]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'drm_iommu_v15' of https://github.com/markyzq/kernel-drm-rockchip into...
authorDave Airlie <airlied@redhat.com>
Mon, 8 Dec 2014 03:45:18 +0000 (13:45 +1000)
committerDave Airlie <airlied@redhat.com>
Mon, 8 Dec 2014 03:45:18 +0000 (13:45 +1000)
Merge rockchip GPU support.

This has a branch in common with the iommu tree, hopefully the
process works.

* 'drm_iommu_v15' of https://github.com/markyzq/kernel-drm-rockchip:
  dt-bindings: video: Add documentation for rockchip vop
  dt-bindings: video: Add for rockchip display subsytem
  drm: rockchip: Add basic drm driver
  dt-bindings: iommu: Add documentation for rockchip iommu
  iommu/rockchip: rk3288 iommu driver

1355 files changed:
CREDITS
Documentation/DocBook/drm.tmpl
Documentation/devicetree/bindings/ata/sata_rcar.txt
Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt [moved from Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt with 100% similarity]
Documentation/devicetree/bindings/drm/imx/hdmi.txt [moved from Documentation/devicetree/bindings/staging/imx-drm/hdmi.txt with 100% similarity]
Documentation/devicetree/bindings/drm/imx/ldb.txt [moved from Documentation/devicetree/bindings/staging/imx-drm/ldb.txt with 100% similarity]
Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
Documentation/devicetree/bindings/panel/auo,b116xw03.txt [new file with mode: 0644]
Documentation/devicetree/bindings/panel/hannstar,hsd070pww1.txt [new file with mode: 0644]
Documentation/devicetree/bindings/panel/hit,tx23d38vm0caa.txt [new file with mode: 0644]
Documentation/devicetree/bindings/panel/innolux,g121i1-l01.txt [new file with mode: 0644]
Documentation/devicetree/bindings/panel/sharp,lq101r1sx01.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pci/pci.txt
Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt
Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt
Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt
Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt
Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt
Documentation/devicetree/bindings/thermal/rcar-thermal.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/devicetree/bindings/video/adi,adv7511.txt [new file with mode: 0644]
Documentation/devicetree/bindings/video/exynos_dsim.txt
Documentation/devicetree/bindings/video/samsung-fimd.txt
Documentation/filesystems/overlayfs.txt
Documentation/input/elantech.txt
Documentation/kernel-parameters.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/timestamping.txt
Documentation/video4linux/vivid.txt
MAINTAINERS
Makefile
arch/arm/Kconfig.debug
arch/arm/boot/compressed/head.S
arch/arm/boot/dts/am335x-evm.dts
arch/arm/boot/dts/am437x-gp-evm.dts
arch/arm/boot/dts/am437x-sk-evm.dts
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/boot/dts/exynos5250-snow.dts
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/r8a7740.dtsi
arch/arm/boot/dts/r8a7779-marzen.dts
arch/arm/boot/dts/r8a7779.dtsi
arch/arm/boot/dts/r8a7790-lager.dts
arch/arm/boot/dts/r8a7790.dtsi
arch/arm/boot/dts/r8a7791-koelsch.dts
arch/arm/boot/dts/r8a7791.dtsi
arch/arm/boot/dts/r8a77xx-aa104xd12-panel.dtsi [new file with mode: 0644]
arch/arm/boot/dts/sama5d31.dtsi
arch/arm/boot/dts/sama5d33.dtsi
arch/arm/boot/dts/sama5d34.dtsi
arch/arm/boot/dts/sama5d35.dtsi
arch/arm/boot/dts/sama5d36.dtsi
arch/arm/boot/dts/sama5d3xcm.dtsi
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/tegra114-dalmore.dts
arch/arm/boot/dts/tegra114-roth.dts
arch/arm/boot/dts/tegra114-tn7.dts
arch/arm/boot/dts/tegra114.dtsi
arch/arm/boot/dts/tegra124-jetson-tk1.dts
arch/arm/boot/dts/tegra124-nyan-big.dts
arch/arm/boot/dts/tegra124-venice2.dts
arch/arm/boot/dts/tegra124.dtsi
arch/arm/boot/dts/tegra20-harmony.dts
arch/arm/boot/dts/tegra20-iris-512.dts
arch/arm/boot/dts/tegra20-medcom-wide.dts
arch/arm/boot/dts/tegra20-paz00.dts
arch/arm/boot/dts/tegra20-seaboard.dts
arch/arm/boot/dts/tegra20-tamonten.dtsi
arch/arm/boot/dts/tegra20-trimslice.dts
arch/arm/boot/dts/tegra20-ventana.dts
arch/arm/boot/dts/tegra20-whistler.dts
arch/arm/boot/dts/tegra20.dtsi
arch/arm/boot/dts/tegra30-apalis-eval.dts
arch/arm/boot/dts/tegra30-beaver.dts
arch/arm/boot/dts/tegra30-cardhu.dtsi
arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
arch/arm/boot/dts/tegra30.dtsi
arch/arm/boot/dts/vf610-cosmic.dts
arch/arm/boot/dts/zynq-parallella.dts
arch/arm/common/edma.c
arch/arm/configs/exynos_defconfig
arch/arm/configs/imx_v4_v5_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/configs/socfpga_defconfig
arch/arm/include/asm/thread_info.h
arch/arm/kernel/traps.c
arch/arm/kvm/mmu.c
arch/arm/mach-imx/clk-vf610.c
arch/arm/mach-ixp4xx/include/mach/io.h
arch/arm/mach-mvebu/board-v7.c
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-omap2/omap_device.c
arch/arm/mach-pxa/include/mach/addr-map.h
arch/arm/mach-shmobile/board-ape6evm-reference.c
arch/arm/mach-shmobile/board-ape6evm.c
arch/arm/mach-shmobile/board-armadillo800eva.c
arch/arm/mach-shmobile/board-bockw-reference.c
arch/arm/mach-shmobile/board-bockw.c
arch/arm/mach-shmobile/board-koelsch-reference.c
arch/arm/mach-shmobile/board-koelsch.c
arch/arm/mach-shmobile/board-kzm9g-reference.c
arch/arm/mach-shmobile/board-kzm9g.c
arch/arm/mach-shmobile/board-lager-reference.c
arch/arm/mach-shmobile/board-lager.c
arch/arm/mach-shmobile/board-mackerel.c
arch/arm/mach-shmobile/board-marzen-reference.c
arch/arm/mach-shmobile/board-marzen.c
arch/arm/mach-shmobile/clock-r8a73a4.c
arch/arm/mach-shmobile/clock-r8a7740.c
arch/arm/mach-shmobile/clock-r8a7778.c
arch/arm/mach-shmobile/clock-r8a7779.c
arch/arm/mach-shmobile/clock-r8a7790.c
arch/arm/mach-shmobile/clock-r8a7791.c
arch/arm/mach-shmobile/clock-sh7372.c
arch/arm/mach-shmobile/clock-sh73a0.c
arch/arm/mach-shmobile/clock.c
arch/arm/mach-shmobile/console.c
arch/arm/mach-shmobile/headsmp-scu.S
arch/arm/mach-shmobile/intc-sh7372.c
arch/arm/mach-shmobile/intc-sh73a0.c
arch/arm/mach-shmobile/r8a7740.h
arch/arm/mach-shmobile/r8a7778.h
arch/arm/mach-shmobile/setup-emev2.c
arch/arm/mach-shmobile/setup-r7s72100.c
arch/arm/mach-shmobile/setup-r8a73a4.c
arch/arm/mach-shmobile/setup-r8a7740.c
arch/arm/mach-shmobile/setup-r8a7778.c
arch/arm/mach-shmobile/setup-r8a7779.c
arch/arm/mach-shmobile/setup-r8a7790.c
arch/arm/mach-shmobile/setup-r8a7791.c
arch/arm/mach-shmobile/setup-rcar-gen2.c
arch/arm/mach-shmobile/setup-sh7372.c
arch/arm/mach-shmobile/setup-sh73a0.c
arch/arm/mach-shmobile/sleep-sh7372.S
arch/arm/mach-shmobile/smp-emev2.c
arch/arm/mach-shmobile/smp-r8a7779.c
arch/arm/mach-shmobile/smp-sh73a0.c
arch/arm/mach-shmobile/timer.c
arch/arm/mach-tegra/irq.c
arch/arm/mm/Kconfig
arch/arm/mm/proc-v7.S
arch/arm/mm/proc-xscale.S
arch/arm/plat-orion/gpio.c
arch/arm64/boot/dts/apm-storm.dtsi
arch/arm64/configs/defconfig
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/efi-entry.S
arch/arm64/kernel/insn.c
arch/arm64/kernel/psci.c
arch/arm64/kvm/sys_regs.c
arch/arm64/lib/clear_user.S
arch/arm64/mm/mmu.c
arch/ia64/kvm/kvm-ia64.c
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/include/asm/asmmacro-32.h
arch/mips/include/asm/asmmacro.h
arch/mips/include/asm/fpregdef.h
arch/mips/include/asm/fpu.h
arch/mips/include/asm/jump_label.h
arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/r4kcache.h
arch/mips/include/asm/uaccess.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/kernel/bmips_vec.S
arch/mips/kernel/branch.c
arch/mips/kernel/cps-vec.S
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/genex.S
arch/mips/kernel/jump_label.c
arch/mips/kernel/r2300_fpu.S
arch/mips/kernel/r2300_switch.S
arch/mips/kernel/r4k_fpu.S
arch/mips/kernel/r4k_switch.S
arch/mips/kernel/r6000_fpu.S
arch/mips/kernel/rtlx.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/setup.c
arch/mips/kernel/signal.c
arch/mips/lib/memcpy.S
arch/mips/lib/r3k_dump_tlb.c
arch/mips/lib/strnlen_user.S
arch/mips/loongson/common/Makefile
arch/mips/loongson/loongson-3/numa.c
arch/mips/math-emu/cp1emu.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlbex.c
arch/mips/mti-sead3/sead3-leds.c
arch/mips/netlogic/xlp/Makefile
arch/mips/oprofile/backtrace.c
arch/mips/pci/msi-xlp.c
arch/mips/sgi-ip27/ip27-memory.c
arch/parisc/include/asm/uaccess.h
arch/parisc/include/uapi/asm/bitsperlong.h
arch/parisc/include/uapi/asm/msgbuf.h
arch/parisc/include/uapi/asm/sembuf.h
arch/parisc/include/uapi/asm/shmbuf.h
arch/parisc/include/uapi/asm/signal.h
arch/parisc/include/uapi/asm/unistd.h
arch/parisc/kernel/syscall_table.S
arch/powerpc/include/asm/fadump.h
arch/powerpc/include/asm/pci-bridge.h
arch/powerpc/kernel/eeh_sysfs.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/fadump.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/vdso32/getcpu.S
arch/powerpc/mm/init_32.c
arch/powerpc/platforms/powernv/opal-hmi.c
arch/powerpc/platforms/powernv/opal-lpc.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/msi.c
arch/powerpc/sysdev/fsl_msi.c
arch/powerpc/xmon/xmon.c
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/defconfig
arch/s390/kernel/ftrace.c
arch/s390/kernel/nmi.c
arch/s390/kernel/vdso32/clock_gettime.S
arch/s390/kernel/vdso32/gettimeofday.S
arch/s390/kernel/vdso64/clock_gettime.S
arch/s390/kernel/vdso64/gettimeofday.S
arch/s390/kernel/vtime.c
arch/sparc/include/asm/atomic_32.h
arch/sparc/include/asm/cmpxchg_32.h
arch/sparc/include/asm/dma-mapping.h
arch/sparc/include/uapi/asm/swab.h
arch/sparc/kernel/pci_schizo.c
arch/sparc/kernel/smp_64.c
arch/sparc/lib/atomic32.c
arch/x86/Kconfig
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/head_32.S
arch/x86/boot/compressed/head_64.S
arch/x86/boot/compressed/misc.c
arch/x86/boot/compressed/mkpiggy.c
arch/x86/include/asm/page_32_types.h
arch/x86/include/asm/page_64_types.h
arch/x86/include/asm/smp.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/traps.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/microcode/amd_early.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/microcode/core_early.c
arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/entry_64.S
arch/x86/kernel/ptrace.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/traps.c
arch/x86/kvm/emulate.c
arch/x86/kvm/mmu.c
arch/x86/lib/csum-wrappers_64.c
arch/x86/mm/init_64.c
arch/x86/tools/calc_run_size.pl [new file with mode: 0644]
arch/x86/xen/smp.c
arch/xtensa/Kconfig
arch/xtensa/boot/dts/lx200mx.dts [new file with mode: 0644]
arch/xtensa/configs/generic_kc705_defconfig [new file with mode: 0644]
arch/xtensa/configs/smp_lx200_defconfig [new file with mode: 0644]
arch/xtensa/include/asm/pgtable.h
arch/xtensa/include/uapi/asm/unistd.h
block/bio-integrity.c
block/blk-merge.c
block/blk-mq.c
block/ioprio.c
block/scsi_ioctl.c
drivers/acpi/blacklist.c
drivers/acpi/device_pm.c
drivers/acpi/video.c
drivers/ata/ahci.c
drivers/ata/libahci.c
drivers/ata/sata_fsl.c
drivers/ata/sata_rcar.c
drivers/atm/solos-pci.c
drivers/base/Kconfig
drivers/base/core.c
drivers/base/power/domain.c
drivers/block/rbd.c
drivers/block/zram/zram_drv.c
drivers/char/agp/intel-gtt.c
drivers/char/hw_random/pseries-rng.c
drivers/char/virtio_console.c
drivers/clk/at91/clk-usb.c
drivers/clk/clk-divider.c
drivers/clk/pxa/clk-pxa27x.c
drivers/clk/qcom/mmcc-apq8084.c
drivers/clk/rockchip/clk.c
drivers/clocksource/sun4i_timer.c
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq.c
drivers/crypto/caam/key_gen.c
drivers/crypto/qat/qat_common/adf_accel_devices.h
drivers/crypto/qat/qat_common/adf_transport.c
drivers/crypto/qat/qat_common/qat_algs.c
drivers/crypto/qat/qat_common/qat_crypto.c
drivers/crypto/qat/qat_dh895xcc/adf_admin.c
drivers/crypto/qat/qat_dh895xcc/adf_drv.c
drivers/crypto/qat/qat_dh895xcc/adf_isr.c
drivers/dma/edma.c
drivers/dma/pl330.c
drivers/dma/sun6i-dma.c
drivers/firewire/core-cdev.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/README.drm [deleted file]
drivers/gpu/drm/amd/amdkfd/Kconfig [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/Makefile [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/cik_regs.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_crat.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_device.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_module.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_pasid.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_priv.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_process.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_queue.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_topology.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_topology.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/kgd_kfd_interface.h [new file with mode: 0644]
drivers/gpu/drm/armada/armada_crtc.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/bochs/bochs_fbdev.c
drivers/gpu/drm/bochs/bochs_hw.c
drivers/gpu/drm/bochs/bochs_kms.c
drivers/gpu/drm/cirrus/cirrus_drv.h
drivers/gpu/drm/cirrus/cirrus_fbdev.c
drivers/gpu/drm/cirrus/cirrus_main.c
drivers/gpu/drm/cirrus/cirrus_mode.c
drivers/gpu/drm/drm_atomic.c [new file with mode: 0644]
drivers/gpu/drm/drm_atomic_helper.c [new file with mode: 0644]
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_edid_load.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_flip_work.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_gem_cma_helper.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/drm_mipi_dsi.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_modeset_lock.c
drivers/gpu/drm/drm_plane_helper.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/exynos/exynos_dp_core.c
drivers/gpu/drm/exynos/exynos_dp_core.h
drivers/gpu/drm/exynos/exynos_drm_crtc.h
drivers/gpu/drm/exynos/exynos_drm_dpi.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_encoder.h
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_iommu.h
drivers/gpu/drm/exynos/exynos_drm_ipp.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/gma500/Makefile
drivers/gpu/drm/gma500/cdv_intel_dp.c
drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
drivers/gpu/drm/gma500/oaktrail_lvds.c
drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_drv.c
drivers/gpu/drm/gma500/psb_drv.h
drivers/gpu/drm/gma500/psb_intel_display.c
drivers/gpu/drm/gma500/psb_intel_drv.h
drivers/gpu/drm/gma500/psb_intel_sdvo.c
drivers/gpu/drm/i2c/Kconfig
drivers/gpu/drm/i2c/Makefile
drivers/gpu/drm/i2c/adv7511.c [new file with mode: 0644]
drivers/gpu/drm/i2c/adv7511.h [new file with mode: 0644]
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_ioc32.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/i915_ums.c
drivers/gpu/drm/i915/intel_audio.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_bios.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_fifo_underrun.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_frontbuffer.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_renderstate.h
drivers/gpu/drm/i915/intel_renderstate_gen8.c
drivers/gpu/drm/i915/intel_renderstate_gen9.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/imx/Kconfig [moved from drivers/staging/imx-drm/Kconfig with 100% similarity]
drivers/gpu/drm/imx/Makefile [moved from drivers/staging/imx-drm/Makefile with 100% similarity]
drivers/gpu/drm/imx/imx-drm-core.c [moved from drivers/staging/imx-drm/imx-drm-core.c with 99% similarity]
drivers/gpu/drm/imx/imx-drm.h [moved from drivers/staging/imx-drm/imx-drm.h with 100% similarity]
drivers/gpu/drm/imx/imx-hdmi.c [moved from drivers/staging/imx-drm/imx-hdmi.c with 100% similarity]
drivers/gpu/drm/imx/imx-hdmi.h [moved from drivers/staging/imx-drm/imx-hdmi.h with 100% similarity]
drivers/gpu/drm/imx/imx-ldb.c [moved from drivers/staging/imx-drm/imx-ldb.c with 100% similarity]
drivers/gpu/drm/imx/imx-tve.c [moved from drivers/staging/imx-drm/imx-tve.c with 100% similarity]
drivers/gpu/drm/imx/ipuv3-crtc.c [moved from drivers/staging/imx-drm/ipuv3-crtc.c with 100% similarity]
drivers/gpu/drm/imx/ipuv3-plane.c [moved from drivers/staging/imx-drm/ipuv3-plane.c with 100% similarity]
drivers/gpu/drm/imx/ipuv3-plane.h [moved from drivers/staging/imx-drm/ipuv3-plane.h with 100% similarity]
drivers/gpu/drm/imx/parallel-display.c [moved from drivers/staging/imx-drm/parallel-display.c with 100% similarity]
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/Makefile
drivers/gpu/drm/msm/adreno/a2xx.xml.h
drivers/gpu/drm/msm/adreno/a3xx.xml.h
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
drivers/gpu/drm/msm/adreno/a4xx.xml.h [new file with mode: 0644]
drivers/gpu/drm/msm/adreno/a4xx_gpu.c [new file with mode: 0644]
drivers/gpu/drm/msm/adreno/a4xx_gpu.h [new file with mode: 0644]
drivers/gpu/drm/msm/adreno/adreno_common.xml.h
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.h
drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
drivers/gpu/drm/msm/dsi/dsi.xml.h
drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
drivers/gpu/drm/msm/dsi/sfpb.xml.h
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/hdmi/hdmi.h
drivers/gpu/drm/msm/hdmi/hdmi.xml.h
drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
drivers/gpu/drm/msm/hdmi/hdmi_connector.c
drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
drivers/gpu/drm/msm/hdmi/qfprom.xml.h
drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
drivers/gpu/drm/msm/msm_atomic.c [new file with mode: 0644]
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fb.c
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/msm/msm_gem_prime.c
drivers/gpu/drm/nouveau/Makefile
drivers/gpu/drm/nouveau/core/core/handle.c
drivers/gpu/drm/nouveau/core/engine/device/base.c
drivers/gpu/drm/nouveau/core/engine/device/gm100.c
drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
drivers/gpu/drm/nouveau/core/engine/device/nve0.c
drivers/gpu/drm/nouveau/core/engine/disp/dport.c
drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
drivers/gpu/drm/nouveau/core/engine/disp/gm204.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
drivers/gpu/drm/nouveau/core/engine/disp/outp.c
drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
drivers/gpu/drm/nouveau/core/include/core/device.h
drivers/gpu/drm/nouveau/core/include/core/handle.h
drivers/gpu/drm/nouveau/core/include/core/object.h
drivers/gpu/drm/nouveau/core/include/engine/disp.h
drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
drivers/gpu/drm/nouveau/core/include/subdev/volt.h
drivers/gpu/drm/nouveau/core/os.h
drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/bios/base.c
drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
drivers/gpu/drm/nouveau/core/subdev/bios/image.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/bios/init.c
drivers/gpu/drm/nouveau/core/subdev/bios/npde.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/bios/priv.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c
drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
drivers/gpu/drm/nouveau/core/subdev/fb/base.c
drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c
drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c
drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c
drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
drivers/gpu/drm/nouveau/core/subdev/volt/base.c
drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/dispnv04/crtc.c
drivers/gpu/drm/nouveau/dispnv04/overlay.c
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_bo.h
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_fence.h
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_platform.c
drivers/gpu/drm/nouveau/nouveau_platform.h
drivers/gpu/drm/nouveau/nouveau_prime.c
drivers/gpu/drm/nouveau/nv17_fence.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nv50_fence.c
drivers/gpu/drm/nouveau/nv84_fence.c
drivers/gpu/drm/nouveau/nvif/class.h
drivers/gpu/drm/nouveau/nvif/client.c
drivers/gpu/drm/nouveau/nvif/driver.h
drivers/gpu/drm/omapdrm/omap_crtc.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/omapdrm/omap_plane.c
drivers/gpu/drm/panel/Kconfig
drivers/gpu/drm/panel/Makefile
drivers/gpu/drm/panel/panel-ld9040.c
drivers/gpu/drm/panel/panel-s6e8aa0.c
drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c [new file with mode: 0644]
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/r128/r128_state.c
drivers/gpu/drm/radeon/Makefile
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/atom.h
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/atombios_i2c.c
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/ci_dpm.h
drivers/gpu/drm/radeon/ci_smc.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cik_reg.h
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/cikd.h
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/evergreen_dma.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/ni_dma.c
drivers/gpu/drm/radeon/ppsmc.h
drivers/gpu/drm/radeon/pptable.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r200.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/r600_dma.c
drivers/gpu/drm/radeon/r600_dpm.c
drivers/gpu/drm/radeon/r600_dpm.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_cursor.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_ib.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/radeon_kfd.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_kfd.h [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/radeon_semaphore.c
drivers/gpu/drm/radeon/radeon_sync.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_trace.h
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/radeon_vce.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770_dma.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dma.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/radeon/si_dpm.h
drivers/gpu/drm/radeon/si_smc.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/radeon/sislands_smc.h
drivers/gpu/drm/radeon/smu7_discrete.h
drivers/gpu/drm/rcar-du/Kconfig
drivers/gpu/drm/rcar-du/Makefile
drivers/gpu/drm/rcar-du/rcar_du_crtc.c
drivers/gpu/drm/rcar-du/rcar_du_crtc.h
drivers/gpu/drm/rcar-du/rcar_du_drv.c
drivers/gpu/drm/rcar-du/rcar_du_drv.h
drivers/gpu/drm/rcar-du/rcar_du_encoder.c
drivers/gpu/drm/rcar-du/rcar_du_encoder.h
drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c [new file with mode: 0644]
drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h [new file with mode: 0644]
drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c [new file with mode: 0644]
drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h [new file with mode: 0644]
drivers/gpu/drm/rcar-du/rcar_du_kms.c
drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/sti/sti_drm_crtc.c
drivers/gpu/drm/tegra/Kconfig
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/drm.h
drivers/gpu/drm/tegra/dsi.c
drivers/gpu/drm/tegra/dsi.h
drivers/gpu/drm/tegra/fb.c
drivers/gpu/drm/tegra/gem.c
drivers/gpu/drm/tegra/gem.h
drivers/gpu/drm/tegra/output.c
drivers/gpu/drm/tilcdc/tilcdc_crtc.c
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/ttm/ttm_bo_manager.c
drivers/gpu/drm/ttm/ttm_execbuf_util.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
drivers/gpu/drm/udl/Makefile
drivers/gpu/drm/udl/udl_dmabuf.c [new file with mode: 0644]
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_drv.h
drivers/gpu/drm/udl/udl_gem.c
drivers/gpu/drm/udl/udl_modeset.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
drivers/gpu/host1x/cdma.c
drivers/gpu/host1x/cdma.h
drivers/gpu/host1x/hw/cdma_hw.c
drivers/gpu/host1x/hw/channel_hw.c
drivers/gpu/host1x/hw/debug_hw.c
drivers/gpu/host1x/job.h
drivers/gpu/host1x/mipi.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/fam15h_power.c
drivers/hwmon/g762.c
drivers/hwmon/ibmpowernv.c
drivers/hwmon/pwm-fan.c
drivers/i2c/algos/i2c-algo-bit.c
drivers/i2c/algos/i2c-algo-pca.c
drivers/i2c/algos/i2c-algo-pcf.c
drivers/i2c/algos/i2c-algo-pcf.h
drivers/i2c/busses/i2c-ali1535.c
drivers/i2c/busses/i2c-ali15x3.c
drivers/i2c/busses/i2c-amd756-s4882.c
drivers/i2c/busses/i2c-amd756.c
drivers/i2c/busses/i2c-at91.c
drivers/i2c/busses/i2c-au1550.c
drivers/i2c/busses/i2c-cadence.c
drivers/i2c/busses/i2c-cpm.c
drivers/i2c/busses/i2c-davinci.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-designware-core.h
drivers/i2c/busses/i2c-designware-pcidrv.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-elektor.c
drivers/i2c/busses/i2c-hydra.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-iop3xx.h
drivers/i2c/busses/i2c-isch.c
drivers/i2c/busses/i2c-ismt.c
drivers/i2c/busses/i2c-nforce2-s4985.c
drivers/i2c/busses/i2c-nforce2.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-parport-light.c
drivers/i2c/busses/i2c-parport.c
drivers/i2c/busses/i2c-parport.h
drivers/i2c/busses/i2c-pasemi.c
drivers/i2c/busses/i2c-pca-isa.c
drivers/i2c/busses/i2c-piix4.c
drivers/i2c/busses/i2c-pmcmsp.c
drivers/i2c/busses/i2c-powermac.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/busses/i2c-sh_mobile.c
drivers/i2c/busses/i2c-sibyte.c
drivers/i2c/busses/i2c-simtec.c
drivers/i2c/busses/i2c-sis5595.c
drivers/i2c/busses/i2c-sis630.c
drivers/i2c/busses/i2c-sis96x.c
drivers/i2c/busses/i2c-taos-evm.c
drivers/i2c/busses/i2c-via.c
drivers/i2c/busses/i2c-viapro.c
drivers/i2c/busses/i2c-xiic.c
drivers/i2c/busses/scx200_acb.c
drivers/i2c/i2c-boardinfo.c
drivers/i2c/i2c-core.c
drivers/i2c/i2c-core.h
drivers/i2c/i2c-dev.c
drivers/i2c/i2c-smbus.c
drivers/i2c/i2c-stub.c
drivers/iio/accel/bmc150-accel.c
drivers/iio/accel/kxcjk-1013.c
drivers/iio/adc/men_z188_adc.c
drivers/iio/gyro/bmg160.c
drivers/iio/light/tsl4531.c
drivers/iio/proximity/as3935.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/input/evdev.c
drivers/input/joystick/xpad.c
drivers/input/misc/twl4030-pwrbutton.c
drivers/input/mouse/alps.c
drivers/input/mouse/elantech.c
drivers/input/mouse/synaptics.c
drivers/iommu/amd_iommu_v2.c
drivers/irqchip/irq-armada-370-xp.c
drivers/irqchip/irq-atmel-aic-common.c
drivers/irqchip/irq-bcm7120-l2.c
drivers/irqchip/irq-brcmstb-l2.c
drivers/md/dm-bufio.c
drivers/md/dm-raid.c
drivers/md/dm-stripe.c
drivers/md/dm-thin.c
drivers/md/md.c
drivers/md/persistent-data/dm-btree-internal.h
drivers/md/persistent-data/dm-btree-spine.c
drivers/md/persistent-data/dm-btree.c
drivers/media/dvb-core/dvb_frontend.c
drivers/media/dvb-frontends/ds3000.c
drivers/media/dvb-frontends/sp2.c
drivers/media/dvb-frontends/tc90522.c
drivers/media/i2c/smiapp/smiapp-core.c
drivers/media/pci/cx23885/cx23885-core.c
drivers/media/pci/solo6x10/solo6x10-core.c
drivers/media/platform/vivid/vivid-core.c
drivers/media/rc/imon.c
drivers/media/rc/ir-hix5hd2.c
drivers/media/rc/ir-rc5-decoder.c
drivers/media/rc/ir-rc6-decoder.c
drivers/media/rc/rc-ir-raw.c
drivers/media/rc/rc-main.c
drivers/media/usb/s2255/s2255drv.c
drivers/mfd/max77693.c
drivers/mfd/rtsx_pcr.c
drivers/mfd/stmpe.h
drivers/mfd/twl4030-power.c
drivers/mfd/viperboard.c
drivers/mmc/core/host.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_netlink.c
drivers/net/can/dev.c
drivers/net/can/m_can/Kconfig
drivers/net/can/m_can/m_can.c
drivers/net/can/rcar_can.c
drivers/net/can/sja1000/kvaser_pci.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/usb/esd_usb2.c
drivers/net/can/usb/gs_usb.c
drivers/net/can/xilinx_can.c
drivers/net/dsa/bcm_sf2.c
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.h
drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qualcomm/Kconfig
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/cpts.c
drivers/net/ieee802154/fakehard.c
drivers/net/macvtap.c
drivers/net/phy/dp83640.c
drivers/net/phy/phy.c
drivers/net/ppp/ppp_generic.c
drivers/net/ppp/pptp.c
drivers/net/tun.c
drivers/net/usb/asix_devices.c
drivers/net/usb/qmi_wwan.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/b43/phy_common.c
drivers/net/wireless/brcm80211/brcmfmac/of.c
drivers/net/wireless/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/rtl8192se/hw.c
drivers/net/wireless/rtlwifi/rtl8192se/phy.c
drivers/net/wireless/rtlwifi/rtl8192se/sw.c
drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/of/address.c
drivers/of/base.c
drivers/of/dynamic.c
drivers/of/fdt.c
drivers/of/selftest.c
drivers/of/testcase-data/tests-phandle.dtsi
drivers/pci/access.c
drivers/pci/host/pci-tegra.c
drivers/pci/host/pci-xgene.c
drivers/pci/msi.c
drivers/pci/pci.h
drivers/pci/probe.c
drivers/phy/phy-omap-usb2.c
drivers/pinctrl/pinctrl-baytrail.c
drivers/platform/x86/Kconfig
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/hp_accel.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/samsung-laptop.c
drivers/platform/x86/toshiba_acpi.c
drivers/power/ab8500_fg.c
drivers/power/bq2415x_charger.c
drivers/power/charger-manager.c
drivers/power/power_supply_core.c
drivers/regulator/max1586.c
drivers/regulator/max77686.c
drivers/regulator/max77693.c
drivers/regulator/max77802.c
drivers/regulator/max8660.c
drivers/regulator/of_regulator.c
drivers/regulator/s2mpa01.c
drivers/s390/kvm/virtio_ccw.c
drivers/scsi/bnx2fc/bnx2fc_els.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_error.c
drivers/scsi/ufs/ufshcd-pltfrm.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/soc/tegra/fuse/fuse-tegra.c
drivers/soc/versatile/soc-realview.c
drivers/spi/spi-dw.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-sirf.c
drivers/spi/spi.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/iio/meter/ade7758.h
drivers/staging/iio/meter/ade7758_core.c
drivers/staging/iio/meter/ade7758_ring.c
drivers/staging/imx-drm/TODO [deleted file]
drivers/staging/rtl8188eu/core/rtw_cmd.c
drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
drivers/staging/rtl8188eu/core/rtw_wlan_util.c
drivers/staging/rtl8188eu/os_dep/usb_intf.c
drivers/target/iscsi/iscsi_target.c
drivers/target/target_core_pr.c
drivers/target/target_core_transport.c
drivers/thermal/cpu_cooling.c
drivers/thermal/imx_thermal.c
drivers/thermal/int340x_thermal/int3403_thermal.c
drivers/thermal/of-thermal.c
drivers/thermal/samsung/exynos_thermal_common.c
drivers/thermal/samsung/exynos_thermal_common.h
drivers/thermal/samsung/exynos_tmu.c
drivers/thermal/samsung/exynos_tmu.h
drivers/thermal/samsung/exynos_tmu_data.c
drivers/thermal/samsung/exynos_tmu_data.h
drivers/thermal/st/st_thermal.c
drivers/thermal/thermal_core.c
drivers/tty/n_tty.c
drivers/tty/serial/8250/8250_mtk.c
drivers/tty/serial/of_serial.c
drivers/tty/serial/serial_core.c
drivers/tty/tty_io.c
drivers/tty/vt/consolemap.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-acm.h
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc3/ep0.c
drivers/usb/host/Kconfig
drivers/usb/host/hwa-hc.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/keyspan.c
drivers/usb/serial/kobil_sct.c
drivers/usb/serial/opticon.c
drivers/usb/serial/ssu100.c
drivers/usb/storage/initializers.c
drivers/usb/storage/realtek_cr.c
drivers/usb/storage/transport.c
drivers/usb/storage/unusual_uas.h
drivers/vhost/scsi.c
drivers/watchdog/s3c2410_wdt.c
fs/Makefile
fs/aio.c
fs/btrfs/compression.c
fs/btrfs/compression.h
fs/btrfs/ctree.c
fs/btrfs/file-item.c
fs/btrfs/locking.c
fs/btrfs/locking.h
fs/btrfs/lzo.c
fs/btrfs/zlib.c
fs/ceph/caps.c
fs/dcache.c
fs/fat/namei_vfat.c
fs/isofs/inode.c
fs/jbd2/journal.c
fs/nfs/blocklayout/blocklayout.c
fs/nfs/blocklayout/rpc_pipefs.c
fs/nfs/delegation.c
fs/nfs/delegation.h
fs/nfs/dir.c
fs/nfs/direct.c
fs/nfs/filelayout/filelayout.c
fs/nfs/inode.c
fs/nfs/netns.h
fs/nfs/nfs4proc.c
fs/nfs/write.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfsd.h
fs/notify/fsnotify.c
fs/notify/fsnotify.h
fs/notify/inode_mark.c
fs/notify/mark.c
fs/notify/vfsmount_mark.c
fs/ocfs2/cluster/tcp.c
fs/overlayfs/Kconfig
fs/overlayfs/Makefile
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_itable.c
fs/xfs/xfs_itable.h
include/drm/drmP.h
include/drm/drm_atomic.h [new file with mode: 0644]
include/drm/drm_atomic_helper.h [new file with mode: 0644]
include/drm/drm_crtc.h
include/drm/drm_crtc_helper.h
include/drm/drm_dp_helper.h
include/drm/drm_dp_mst_helper.h
include/drm/drm_edid.h
include/drm/drm_flip_work.h
include/drm/drm_gem.h
include/drm/drm_gem_cma_helper.h
include/drm/drm_mipi_dsi.h
include/drm/drm_modeset_lock.h
include/drm/drm_plane_helper.h
include/drm/i915_pciids.h
include/drm/ttm/ttm_execbuf_util.h
include/dt-bindings/clock/qcom,mmcc-apq8084.h
include/dt-bindings/clock/vf610-clock.h
include/dt-bindings/pinctrl/dra.h
include/linux/bitops.h
include/linux/bootmem.h
include/linux/can/dev.h
include/linux/clk-provider.h
include/linux/cma.h
include/linux/hdmi.h
include/linux/iio/events.h
include/linux/inetdevice.h
include/linux/kernel_stat.h
include/linux/kvm_host.h
include/linux/mfd/max77693-private.h
include/linux/mmu_notifier.h
include/linux/mmzone.h
include/linux/nfs_xdr.h
include/linux/of.h
include/linux/page-isolation.h
include/linux/pci-acpi.h
include/linux/pci.h
include/linux/percpu-refcount.h
include/linux/platform_data/rcar-du.h [deleted file]
include/linux/pm_domain.h
include/linux/power/charger-manager.h
include/linux/power_supply.h
include/linux/ring_buffer.h
include/linux/socket.h
include/net/9p/transport.h
include/net/inet_common.h
include/net/netfilter/nf_tables.h
include/net/udp_tunnel.h
include/net/vxlan.h
include/sound/pcm.h
include/sound/soc-dpcm.h
include/trace/events/host1x.h
include/uapi/drm/drm_mode.h
include/uapi/drm/i915_drm.h
include/uapi/linux/Kbuild
include/uapi/linux/if_bridge.h
include/uapi/linux/kfd_ioctl.h [new file with mode: 0644]
include/uapi/sound/asound.h
init/main.c
ipc/sem.c
kernel/audit.c
kernel/audit_tree.c
kernel/events/core.c
kernel/events/uprobes.c
kernel/panic.c
kernel/power/suspend.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/idle_task.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/stop_task.c
kernel/time/posix-cpu-timers.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
lib/Makefile
lib/genalloc.c
lib/rhashtable.c
lib/show_mem.c
mm/bootmem.c
mm/cma.c
mm/compaction.c
mm/fremap.c
mm/frontswap.c
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/iov_iter.c
mm/ksm.c
mm/memory.c
mm/memory_hotplug.c
mm/migrate.c
mm/mmap.c
mm/mmu_notifier.c
mm/nobootmem.c
mm/page_alloc.c
mm/page_isolation.c
mm/rmap.c
mm/slab.c
mm/slab_common.c
mm/truncate.c
mm/vmpressure.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/netfilter/nft_reject_bridge.c
net/ceph/auth_x.c
net/ceph/crypto.c
net/ceph/messenger.c
net/ceph/osd_client.c
net/core/rtnetlink.c
net/core/skbuff.c
net/dcb/dcbnl.c
net/dsa/slave.c
net/ipv4/af_inet.c
net/ipv4/fib_rules.c
net/ipv4/fou.c
net/ipv4/geneve.c
net/ipv4/igmp.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_vti.c
net/ipv4/netfilter/nft_masq_ipv4.c
net/ipv4/ping.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_udp_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ip6mr.c
net/ipv6/mcast.c
net/ipv6/netfilter/nft_masq_ipv6.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipx/af_ipx.c
net/mac80211/aes_ccm.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/mesh.c
net/mac80211/mlme.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rx.c
net/mac80211/spectmgmt.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink.c
net/netfilter/nft_compat.c
net/netlink/af_netlink.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/flow_netlink.c
net/packet/af_packet.c
net/sctp/auth.c
net/sctp/sm_make_chunk.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/svcsock.c
security/keys/internal.h
security/keys/keyctl.c
security/keys/keyring.c
security/keys/request_key.c
security/keys/request_key_auth.c
security/selinux/hooks.c
sound/core/pcm.c
sound/core/pcm_misc.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_priv.h
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/cs42l51-i2c.c
sound/soc/codecs/cs42l51.c
sound/soc/codecs/cs42l51.h
sound/soc/codecs/es8328-i2c.c
sound/soc/codecs/max98090.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/rt5670.c
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/sgtl5000.h
sound/soc/codecs/wm_adsp.c
sound/soc/fsl/fsl_asrc.c
sound/soc/rockchip/rockchip_i2s.c
sound/soc/samsung/snow.c
sound/soc/sh/fsi.c
sound/soc/sh/rcar/core.c
sound/soc/soc-core.c
sound/soc/soc-pcm.c
sound/usb/card.c
sound/usb/mixer.c
sound/usb/mixer_quirks.c
sound/usb/quirks.c
tools/testing/selftests/ftrace/ftracetest
tools/testing/selftests/net/psock_fanout.c
virt/kvm/arm/vgic.c
virt/kvm/kvm_main.c

diff --git a/CREDITS b/CREDITS
index bb6278884f894878dc49e1c6722a0cbe3e89e82e..c56d8aa10131d8443f7c5b717ae2b13b2a1d376b 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -1197,6 +1197,13 @@ S: R. Tocantins, 89 - Cristo Rei
 S: 80050-430 - Curitiba - Paraná
 S: Brazil
 
+N: Oded Gabbay
+E: oded.gabbay@gmail.com
+D: AMD KFD maintainer
+S: 12 Shraga Raphaeli
+S: Petah-Tikva, 4906418
+S: Israel
+
 N: Kumar Gala
 E: galak@kernel.crashing.org
 D: Embedded PowerPC 6xx/7xx/74xx/82xx/83xx/85xx support
index be35bc328b775948cd10d1c2cf3f8df76256aeb2..56e2a9b65c6857b1788592c985d7aa56180b15c4 100644 (file)
@@ -492,10 +492,10 @@ char *date;</synopsis>
     <sect2>
       <title>The Translation Table Manager (TTM)</title>
       <para>
-       TTM design background and information belongs here.
+        TTM design background and information belongs here.
       </para>
       <sect3>
-       <title>TTM initialization</title>
+        <title>TTM initialization</title>
         <warning><para>This section is outdated.</para></warning>
         <para>
           Drivers wishing to support TTM must fill out a drm_bo_driver
@@ -503,42 +503,42 @@ char *date;</synopsis>
           pointers for initializing the TTM, allocating and freeing memory,
           waiting for command completion and fence synchronization, and memory
           migration. See the radeon_ttm.c file for an example of usage.
-       </para>
-       <para>
-         The ttm_global_reference structure is made up of several fields:
-       </para>
-       <programlisting>
-         struct ttm_global_reference {
-               enum ttm_global_types global_type;
-               size_t size;
-               void *object;
-               int (*init) (struct ttm_global_reference *);
-               void (*release) (struct ttm_global_reference *);
-         };
-       </programlisting>
-       <para>
-         There should be one global reference structure for your memory
-         manager as a whole, and there will be others for each object
-         created by the memory manager at runtime.  Your global TTM should
-         have a type of TTM_GLOBAL_TTM_MEM.  The size field for the global
-         object should be sizeof(struct ttm_mem_global), and the init and
-         release hooks should point at your driver-specific init and
-         release routines, which probably eventually call
-         ttm_mem_global_init and ttm_mem_global_release, respectively.
-       </para>
-       <para>
-         Once your global TTM accounting structure is set up and initialized
-         by calling ttm_global_item_ref() on it,
-         you need to create a buffer object TTM to
-         provide a pool for buffer object allocation by clients and the
-         kernel itself.  The type of this object should be TTM_GLOBAL_TTM_BO,
-         and its size should be sizeof(struct ttm_bo_global).  Again,
-         driver-specific init and release functions may be provided,
-         likely eventually calling ttm_bo_global_init() and
-         ttm_bo_global_release(), respectively.  Also, like the previous
-         object, ttm_global_item_ref() is used to create an initial reference
-         count for the TTM, which will call your initialization function.
-       </para>
+        </para>
+        <para>
+          The ttm_global_reference structure is made up of several fields:
+        </para>
+        <programlisting>
+          struct ttm_global_reference {
+                  enum ttm_global_types global_type;
+                  size_t size;
+                  void *object;
+                  int (*init) (struct ttm_global_reference *);
+                  void (*release) (struct ttm_global_reference *);
+          };
+        </programlisting>
+        <para>
+          There should be one global reference structure for your memory
+          manager as a whole, and there will be others for each object
+          created by the memory manager at runtime.  Your global TTM should
+          have a type of TTM_GLOBAL_TTM_MEM.  The size field for the global
+          object should be sizeof(struct ttm_mem_global), and the init and
+          release hooks should point at your driver-specific init and
+          release routines, which probably eventually call
+          ttm_mem_global_init and ttm_mem_global_release, respectively.
+        </para>
+        <para>
+          Once your global TTM accounting structure is set up and initialized
+          by calling ttm_global_item_ref() on it,
+          you need to create a buffer object TTM to
+          provide a pool for buffer object allocation by clients and the
+          kernel itself.  The type of this object should be TTM_GLOBAL_TTM_BO,
+          and its size should be sizeof(struct ttm_bo_global).  Again,
+          driver-specific init and release functions may be provided,
+          likely eventually calling ttm_bo_global_init() and
+          ttm_bo_global_release(), respectively.  Also, like the previous
+          object, ttm_global_item_ref() is used to create an initial reference
+          count for the TTM, which will call your initialization function.
+        </para>
       </sect3>
     </sect2>
     <sect2 id="drm-gem">
@@ -566,19 +566,19 @@ char *date;</synopsis>
         using driver-specific ioctls.
       </para>
       <para>
-       On a fundamental level, GEM involves several operations:
-       <itemizedlist>
-         <listitem>Memory allocation and freeing</listitem>
-         <listitem>Command execution</listitem>
-         <listitem>Aperture management at command execution time</listitem>
-       </itemizedlist>
-       Buffer object allocation is relatively straightforward and largely
+        On a fundamental level, GEM involves several operations:
+        <itemizedlist>
+          <listitem>Memory allocation and freeing</listitem>
+          <listitem>Command execution</listitem>
+          <listitem>Aperture management at command execution time</listitem>
+        </itemizedlist>
+        Buffer object allocation is relatively straightforward and largely
         provided by Linux's shmem layer, which provides memory to back each
         object.
       </para>
       <para>
         Device-specific operations, such as command execution, pinning, buffer
-       read &amp; write, mapping, and domain ownership transfers are left to
+        read &amp; write, mapping, and domain ownership transfers are left to
         driver-specific ioctls.
       </para>
       <sect3>
@@ -738,16 +738,16 @@ char *date;</synopsis>
           respectively. The conversion is handled by the DRM core without any
           driver-specific support.
         </para>
-       <para>
-         GEM also supports buffer sharing with dma-buf file descriptors through
-         PRIME. GEM-based drivers must use the provided helpers functions to
-         implement the exporting and importing correctly. See <xref linkend="drm-prime-support" />.
-         Since sharing file descriptors is inherently more secure than the
-         easily guessable and global GEM names it is the preferred buffer
-         sharing mechanism. Sharing buffers through GEM names is only supported
-         for legacy userspace. Furthermore PRIME also allows cross-device
-         buffer sharing since it is based on dma-bufs.
-       </para>
+        <para>
+          GEM also supports buffer sharing with dma-buf file descriptors through
+          PRIME. GEM-based drivers must use the provided helpers functions to
+          implement the exporting and importing correctly. See <xref linkend="drm-prime-support" />.
+          Since sharing file descriptors is inherently more secure than the
+          easily guessable and global GEM names it is the preferred buffer
+          sharing mechanism. Sharing buffers through GEM names is only supported
+          for legacy userspace. Furthermore PRIME also allows cross-device
+          buffer sharing since it is based on dma-bufs.
+        </para>
       </sect3>
       <sect3 id="drm-gem-objects-mapping">
         <title>GEM Objects Mapping</title>
@@ -852,7 +852,7 @@ char *date;</synopsis>
       <sect3>
         <title>Command Execution</title>
         <para>
-         Perhaps the most important GEM function for GPU devices is providing a
+          Perhaps the most important GEM function for GPU devices is providing a
           command execution interface to clients. Client programs construct
           command buffers containing references to previously allocated memory
           objects, and then submit them to GEM. At that point, GEM takes care to
@@ -874,95 +874,101 @@ char *date;</synopsis>
         <title>GEM Function Reference</title>
 !Edrivers/gpu/drm/drm_gem.c
       </sect3>
-      </sect2>
-      <sect2>
-       <title>VMA Offset Manager</title>
+    </sect2>
+    <sect2>
+      <title>VMA Offset Manager</title>
 !Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager
 !Edrivers/gpu/drm/drm_vma_manager.c
 !Iinclude/drm/drm_vma_manager.h
-      </sect2>
-      <sect2 id="drm-prime-support">
-       <title>PRIME Buffer Sharing</title>
-       <para>
-         PRIME is the cross device buffer sharing framework in drm, originally
-         created for the OPTIMUS range of multi-gpu platforms. To userspace
-         PRIME buffers are dma-buf based file descriptors.
-       </para>
-       <sect3>
-         <title>Overview and Driver Interface</title>
-         <para>
-           Similar to GEM global names, PRIME file descriptors are
-           also used to share buffer objects across processes. They offer
-           additional security: as file descriptors must be explicitly sent over
-           UNIX domain sockets to be shared between applications, they can't be
-           guessed like the globally unique GEM names.
-         </para>
-         <para>
-           Drivers that support the PRIME
-           API must set the DRIVER_PRIME bit in the struct
-           <structname>drm_driver</structname>
-           <structfield>driver_features</structfield> field, and implement the
-           <methodname>prime_handle_to_fd</methodname> and
-           <methodname>prime_fd_to_handle</methodname> operations.
-         </para>
-         <para>
-           <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev,
-                         struct drm_file *file_priv, uint32_t handle,
-                         uint32_t flags, int *prime_fd);
+    </sect2>
+    <sect2 id="drm-prime-support">
+      <title>PRIME Buffer Sharing</title>
+      <para>
+        PRIME is the cross device buffer sharing framework in drm, originally
+        created for the OPTIMUS range of multi-gpu platforms. To userspace
+        PRIME buffers are dma-buf based file descriptors.
+      </para>
+      <sect3>
+        <title>Overview and Driver Interface</title>
+        <para>
+          Similar to GEM global names, PRIME file descriptors are
+          also used to share buffer objects across processes. They offer
+          additional security: as file descriptors must be explicitly sent over
+          UNIX domain sockets to be shared between applications, they can't be
+          guessed like the globally unique GEM names.
+        </para>
+        <para>
+          Drivers that support the PRIME
+          API must set the DRIVER_PRIME bit in the struct
+          <structname>drm_driver</structname>
+          <structfield>driver_features</structfield> field, and implement the
+          <methodname>prime_handle_to_fd</methodname> and
+          <methodname>prime_fd_to_handle</methodname> operations.
+        </para>
+        <para>
+          <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev,
+                          struct drm_file *file_priv, uint32_t handle,
+                          uint32_t flags, int *prime_fd);
 int (*prime_fd_to_handle)(struct drm_device *dev,
-                         struct drm_file *file_priv, int prime_fd,
-                         uint32_t *handle);</synopsis>
-           Those two operations convert a handle to a PRIME file descriptor and
-           vice versa. Drivers must use the kernel dma-buf buffer sharing framework
-           to manage the PRIME file descriptors. Similar to the mode setting
-           API PRIME is agnostic to the underlying buffer object manager, as
-           long as handles are 32bit unsigned integers.
-         </para>
-         <para>
-           While non-GEM drivers must implement the operations themselves, GEM
-           drivers must use the <function>drm_gem_prime_handle_to_fd</function>
-           and <function>drm_gem_prime_fd_to_handle</function> helper functions.
-           Those helpers rely on the driver
-           <methodname>gem_prime_export</methodname> and
-           <methodname>gem_prime_import</methodname> operations to create a dma-buf
-           instance from a GEM object (dma-buf exporter role) and to create a GEM
-           object from a dma-buf instance (dma-buf importer role).
-         </para>
-         <para>
-           <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
-                                    struct drm_gem_object *obj,
-                                    int flags);
+                          struct drm_file *file_priv, int prime_fd,
+                          uint32_t *handle);</synopsis>
+            Those two operations convert a handle to a PRIME file descriptor and
+            vice versa. Drivers must use the kernel dma-buf buffer sharing framework
+            to manage the PRIME file descriptors. Similar to the mode setting
+            API PRIME is agnostic to the underlying buffer object manager, as
+            long as handles are 32bit unsigned integers.
+          </para>
+          <para>
+            While non-GEM drivers must implement the operations themselves, GEM
+            drivers must use the <function>drm_gem_prime_handle_to_fd</function>
+            and <function>drm_gem_prime_fd_to_handle</function> helper functions.
+            Those helpers rely on the driver
+            <methodname>gem_prime_export</methodname> and
+            <methodname>gem_prime_import</methodname> operations to create a dma-buf
+            instance from a GEM object (dma-buf exporter role) and to create a GEM
+            object from a dma-buf instance (dma-buf importer role).
+          </para>
+          <para>
+            <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
+                             struct drm_gem_object *obj,
+                             int flags);
 struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
-                                           struct dma_buf *dma_buf);</synopsis>
-           These two operations are mandatory for GEM drivers that support
-           PRIME.
-         </para>
-       </sect3>
-        <sect3>
-          <title>PRIME Helper Functions</title>
-!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers
+                                            struct dma_buf *dma_buf);</synopsis>
+            These two operations are mandatory for GEM drivers that support
+            PRIME.
+          </para>
         </sect3>
-      </sect2>
-      <sect2>
-       <title>PRIME Function References</title>
+      <sect3>
+        <title>PRIME Helper Functions</title>
+!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers
+      </sect3>
+    </sect2>
+    <sect2>
+      <title>PRIME Function References</title>
 !Edrivers/gpu/drm/drm_prime.c
-      </sect2>
-      <sect2>
-       <title>DRM MM Range Allocator</title>
-       <sect3>
-         <title>Overview</title>
+    </sect2>
+    <sect2>
+      <title>DRM MM Range Allocator</title>
+      <sect3>
+        <title>Overview</title>
 !Pdrivers/gpu/drm/drm_mm.c Overview
-       </sect3>
-       <sect3>
-         <title>LRU Scan/Eviction Support</title>
+      </sect3>
+      <sect3>
+        <title>LRU Scan/Eviction Support</title>
 !Pdrivers/gpu/drm/drm_mm.c lru scan roaster
-       </sect3>
+      </sect3>
       </sect2>
-      <sect2>
-       <title>DRM MM Range Allocator Function References</title>
+    <sect2>
+      <title>DRM MM Range Allocator Function References</title>
 !Edrivers/gpu/drm/drm_mm.c
 !Iinclude/drm/drm_mm.h
-      </sect2>
+    </sect2>
+    <sect2>
+      <title>CMA Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_gem_cma_helper.c cma helpers
+!Edrivers/gpu/drm/drm_gem_cma_helper.c
+!Iinclude/drm/drm_gem_cma_helper.h
+    </sect2>
   </sect1>
 
   <!-- Internals: mode setting -->
@@ -994,6 +1000,10 @@ int max_width, max_height;</synopsis>
       <title>Display Modes Function Reference</title>
 !Iinclude/drm/drm_modes.h
 !Edrivers/gpu/drm/drm_modes.c
+    </sect2>
+    <sect2>
+      <title>Atomic Mode Setting Function Reference</title>
+!Edrivers/gpu/drm/drm_atomic.c
     </sect2>
     <sect2>
       <title>Frame Buffer Creation</title>
@@ -1825,6 +1835,10 @@ void intel_crt_init(struct drm_device *dev)
     <sect2>
       <title>KMS API Functions</title>
 !Edrivers/gpu/drm/drm_crtc.c
+    </sect2>
+    <sect2>
+      <title>KMS Data Structures</title>
+!Iinclude/drm/drm_crtc.h
     </sect2>
     <sect2>
       <title>KMS Locking</title>
@@ -2315,9 +2329,27 @@ void intel_crt_init(struct drm_device *dev)
         </listitem>
       </itemizedlist>
     </sect2>
+    <sect2>
+      <title>Atomic Modeset Helper Functions Reference</title>
+      <sect3>
+       <title>Overview</title>
+!Pdrivers/gpu/drm/drm_atomic_helper.c overview
+      </sect3>
+      <sect3>
+       <title>Implementing Asynchronous Atomic Commit</title>
+!Pdrivers/gpu/drm/drm_atomic_helper.c implementing async commit
+      </sect3>
+      <sect3>
+       <title>Atomic State Reset and Initialization</title>
+!Pdrivers/gpu/drm/drm_atomic_helper.c atomic state reset and initialization
+      </sect3>
+!Iinclude/drm/drm_atomic_helper.h
+!Edrivers/gpu/drm/drm_atomic_helper.c
+    </sect2>
     <sect2>
       <title>Modeset Helper Functions Reference</title>
 !Edrivers/gpu/drm/drm_crtc_helper.c
+!Pdrivers/gpu/drm/drm_crtc_helper.c overview
     </sect2>
     <sect2>
       <title>Output Probing Helper Functions Reference</title>
@@ -2341,6 +2373,12 @@ void intel_crt_init(struct drm_device *dev)
 !Pdrivers/gpu/drm/drm_dp_mst_topology.c dp mst helper
 !Iinclude/drm/drm_dp_mst_helper.h
 !Edrivers/gpu/drm/drm_dp_mst_topology.c
+    </sect2>
+    <sect2>
+      <title>MIPI DSI Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_mipi_dsi.c dsi helpers
+!Iinclude/drm/drm_mipi_dsi.h
+!Edrivers/gpu/drm/drm_mipi_dsi.c
     </sect2>
     <sect2>
       <title>EDID Helper Functions Reference</title>
@@ -2371,7 +2409,8 @@ void intel_crt_init(struct drm_device *dev)
     </sect2>
     <sect2>
       <title id="drm-kms-planehelpers">Plane Helper Reference</title>
-!Edrivers/gpu/drm/drm_plane_helper.c Plane Helpers
+!Edrivers/gpu/drm/drm_plane_helper.c
+!Pdrivers/gpu/drm/drm_plane_helper.c overview
     </sect2>
   </sect1>
 
@@ -2507,8 +2546,8 @@ void intel_crt_init(struct drm_device *dev)
        <td valign="top" >Description/Restrictions</td>
        </tr>
        <tr>
-       <td rowspan="21" valign="top" >DRM</td>
-       <td rowspan="2" valign="top" >Generic</td>
+       <td rowspan="23" valign="top" >DRM</td>
+       <td rowspan="3" valign="top" >Generic</td>
        <td valign="top" >“EDID”</td>
        <td valign="top" >BLOB | IMMUTABLE</td>
        <td valign="top" >0</td>
@@ -2523,6 +2562,13 @@ void intel_crt_init(struct drm_device *dev)
        <td valign="top" >Contains DPMS operation mode value.</td>
        </tr>
        <tr>
+       <td valign="top" >“PATH”</td>
+       <td valign="top" >BLOB | IMMUTABLE</td>
+       <td valign="top" >0</td>
+       <td valign="top" >Connector</td>
+       <td valign="top" >Contains topology path to a connector.</td>
+       </tr>
+       <tr>
        <td rowspan="1" valign="top" >Plane</td>
        <td valign="top" >“type”</td>
        <td valign="top" >ENUM | IMMUTABLE</td>
@@ -2638,6 +2684,21 @@ void intel_crt_init(struct drm_device *dev)
        <td valign="top" >TBD</td>
        </tr>
        <tr>
+       <td rowspan="2" valign="top" >Virtual GPU</td>
+       <td valign="top" >“suggested X”</td>
+       <td valign="top" >RANGE</td>
+       <td valign="top" >Min=0, Max=0xffffffff</td>
+       <td valign="top" >Connector</td>
+       <td valign="top" >property to suggest an X offset for a connector</td>
+       </tr>
+       <tr>
+       <td valign="top" >“suggested Y”</td>
+       <td valign="top" >RANGE</td>
+       <td valign="top" >Min=0, Max=0xffffffff</td>
+       <td valign="top" >Connector</td>
+       <td valign="top" >property to suggest an Y offset for a connector</td>
+       </tr>
+       <tr>
        <td rowspan="3" valign="top" >Optional</td>
        <td valign="top" >“scaling mode”</td>
        <td valign="top" >ENUM</td>
@@ -3787,6 +3848,26 @@ int num_ioctls;</synopsis>
       blocks. This excludes a set of SoC platforms with an SGX rendering unit,
       those have basic support through the gma500 drm driver.
     </para>
+    <sect1>
+      <title>Core Driver Infrastructure</title>
+      <para>
+       This section covers core driver infrastructure used by both the display
+       and the GEM parts of the driver.
+      </para>
+      <sect2>
+        <title>Runtime Power Management</title>
+!Pdrivers/gpu/drm/i915/intel_runtime_pm.c runtime pm
+!Idrivers/gpu/drm/i915/intel_runtime_pm.c
+      </sect2>
+      <sect2>
+        <title>Interrupt Handling</title>
+!Pdrivers/gpu/drm/i915/i915_irq.c interrupt handling
+!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_init intel_irq_init_hw intel_hpd_init
+!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_fini
+!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts
+!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts
+      </sect2>
+    </sect1>
     <sect1>
       <title>Display Hardware Handling</title>
       <para>
@@ -3803,6 +3884,18 @@ int num_ioctls;</synopsis>
           configuration change.
         </para>
       </sect2>
+      <sect2>
+        <title>Frontbuffer Tracking</title>
+!Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking
+!Idrivers/gpu/drm/i915/intel_frontbuffer.c
+!Fdrivers/gpu/drm/i915/intel_drv.h intel_frontbuffer_flip
+!Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
+      </sect2>
+      <sect2>
+        <title>Display FIFO Underrun Reporting</title>
+!Pdrivers/gpu/drm/i915/intel_fifo_underrun.c fifo underrun handling
+!Idrivers/gpu/drm/i915/intel_fifo_underrun.c
+      </sect2>
       <sect2>
         <title>Plane Configuration</title>
         <para>
@@ -3822,6 +3915,16 @@ int num_ioctls;</synopsis>
          probing, so those sections fully apply.
         </para>
       </sect2>
+      <sect2>
+       <title>High Definition Audio</title>
+!Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
+!Idrivers/gpu/drm/i915/intel_audio.c
+      </sect2>
+      <sect2>
+       <title>Panel Self Refresh PSR (PSR/SRD)</title>
+!Pdrivers/gpu/drm/i915/intel_psr.c Panel Self Refresh (PSR/SRD)
+!Idrivers/gpu/drm/i915/intel_psr.c
+      </sect2>
       <sect2>
         <title>DPIO</title>
 !Pdrivers/gpu/drm/i915/i915_reg.h DPIO
@@ -3931,6 +4034,28 @@ int num_ioctls;</synopsis>
 !Idrivers/gpu/drm/i915/intel_lrc.c
       </sect2>
     </sect1>
+
+    <sect1>
+      <title> Tracing </title>
+      <para>
+    This sections covers all things related to the tracepoints implemented in
+    the i915 driver.
+      </para>
+      <sect2>
+        <title> i915_ppgtt_create and i915_ppgtt_release </title>
+!Pdrivers/gpu/drm/i915/i915_trace.h i915_ppgtt_create and i915_ppgtt_release tracepoints
+      </sect2>
+      <sect2>
+        <title> i915_context_create and i915_context_free </title>
+!Pdrivers/gpu/drm/i915/i915_trace.h i915_context_create and i915_context_free tracepoints
+      </sect2>
+      <sect2>
+        <title> switch_mm </title>
+!Pdrivers/gpu/drm/i915/i915_trace.h switch_mm tracepoint
+      </sect2>
+    </sect1>
+
   </chapter>
+!Cdrivers/gpu/drm/i915/i915_irq.c
 </part>
 </book>
index 1e6111333fa88f86a2008325637195f51d50648d..80ae87a0784bdba8f2014d51ec6b083512c1dc4f 100644 (file)
@@ -3,8 +3,10 @@
 Required properties:
 - compatible           : should contain one of the following:
                          - "renesas,sata-r8a7779" for R-Car H1
-                         - "renesas,sata-r8a7790" for R-Car H2
-                         - "renesas,sata-r8a7791" for R-Car M2
+                         - "renesas,sata-r8a7790-es1" for R-Car H2 ES1
+                         - "renesas,sata-r8a7790" for R-Car H2 other than ES1
+                         - "renesas,sata-r8a7791" for R-Car M2-W
+                         - "renesas,sata-r8a7793" for R-Car M2-N
 - reg                  : address and length of the SATA registers;
 - interrupts           : must consist of one interrupt specifier.
 
index b48f4ef31d937ff8c4944e379d55357045653f02..4c32ef0b7db8fd635285b63722771a1a5ee9f848 100644 (file)
@@ -191,6 +191,8 @@ of the following host1x client modules:
   - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
   - nvidia,edid: supplies a binary EDID blob
   - nvidia,panel: phandle of a display panel
+  - nvidia,ganged-mode: contains a phandle to a second DSI controller to gang
+    up with in order to support up to 8 data lanes
 
 - sor: serial output resource
 
index ce6a1a0720285bd9be4549d478ea49b4b985ee31..8a3c4082989906248fd4f07ee7a1493b20580794 100644 (file)
@@ -30,10 +30,6 @@ should only be used when a device has multiple interrupt parents.
   Example:
        interrupts-extended = <&intc1 5 1>, <&intc2 1 0>;
 
-A device node may contain either "interrupts" or "interrupts-extended", but not
-both. If both properties are present, then the operating system should log an
-error and use only the data in "interrupts".
-
 2) Interrupt controller nodes
 -----------------------------
 
diff --git a/Documentation/devicetree/bindings/panel/auo,b116xw03.txt b/Documentation/devicetree/bindings/panel/auo,b116xw03.txt
new file mode 100644 (file)
index 0000000..690d0a5
--- /dev/null
@@ -0,0 +1,7 @@
+AU Optronics Corporation 11.6" HD (1366x768) color TFT-LCD panel
+
+Required properties:
+- compatible: should be "auo,b116xw03"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/hannstar,hsd070pww1.txt b/Documentation/devicetree/bindings/panel/hannstar,hsd070pww1.txt
new file mode 100644 (file)
index 0000000..7da1d5c
--- /dev/null
@@ -0,0 +1,7 @@
+HannStar Display Corp. HSD070PWW1 7.0" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "hannstar,hsd070pww1"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/hit,tx23d38vm0caa.txt b/Documentation/devicetree/bindings/panel/hit,tx23d38vm0caa.txt
new file mode 100644 (file)
index 0000000..04caaae
--- /dev/null
@@ -0,0 +1,7 @@
+Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel
+
+Required properties:
+- compatible: should be "hit,tx23d38vm0caa"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/innolux,g121i1-l01.txt b/Documentation/devicetree/bindings/panel/innolux,g121i1-l01.txt
new file mode 100644 (file)
index 0000000..2743b07
--- /dev/null
@@ -0,0 +1,7 @@
+Innolux Corporation 12.1" WXGA (1280x800) TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,g121i1-l01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/sharp,lq101r1sx01.txt b/Documentation/devicetree/bindings/panel/sharp,lq101r1sx01.txt
new file mode 100644 (file)
index 0000000..f522bb8
--- /dev/null
@@ -0,0 +1,49 @@
+Sharp Microelectronics 10.1" WQXGA TFT LCD panel
+
+This panel requires a dual-channel DSI host to operate. It supports two modes:
+- left-right: each channel drives the left or right half of the screen
+- even-odd: each channel drives the even or odd lines of the screen
+
+Each of the DSI channels controls a separate DSI peripheral. The peripheral
+driven by the first link (DSI-LINK1), left or even, is considered the primary
+peripheral and controls the device. The 'link2' property contains a phandle
+to the peripheral driven by the second link (DSI-LINK2, right or odd).
+
+Note that in video mode the DSI-LINK1 interface always provides the left/even
+pixels and DSI-LINK2 always provides the right/odd pixels. In command mode it
+is possible to program either link to drive the left/even or right/odd pixels
+but for the sake of consistency this binding assumes that the same assignment
+is chosen as for video mode.
+
+Required properties:
+- compatible: should be "sharp,lq101r1sx01"
+- reg: DSI virtual channel of the peripheral
+
+Required properties (for DSI-LINK1 only):
+- link2: phandle to the DSI peripheral on the secondary link. Note that the
+  presence of this property marks the containing node as DSI-LINK1.
+- power-supply: phandle of the regulator that provides the supply voltage
+
+Optional properties (for DSI-LINK1 only):
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+
+       dsi@54300000 {
+               panel: panel@0 {
+                       compatible = "sharp,lq101r1sx01";
+                       reg = <0>;
+
+                       link2 = <&secondary>;
+
+                       power-supply = <...>;
+                       backlight = <...>;
+               };
+       };
+
+       dsi@54400000 {
+               secondary: panel@0 {
+                       compatible = "sharp,lq101r1sx01";
+                       reg = <0>;
+               };
+       };
index 41aeed38926d19e84b67f3194994ded9c584106c..f8fbe9af7b2f276350426b299c6030adb9464640 100644 (file)
@@ -7,3 +7,14 @@ And for the interrupt mapping part:
 
 Open Firmware Recommended Practice: Interrupt Mapping
 http://www.openfirmware.org/1275/practice/imap/imap0_9d.pdf
+
+Additionally to the properties specified in the above standards a host bridge
+driver implementation may support the following properties:
+
+- linux,pci-domain:
+   If present this property assigns a fixed PCI domain number to a host bridge,
+   otherwise an unstable (across boots) unique number will be assigned.
+   It is required to either not set this property at all or set it for all
+   host bridges in the system, otherwise potentially conflicting domain numbers
+   may be assigned to root buses behind different host bridges.  The domain
+   number for each host bridge in the system must be unique.
index a186181c402ba693b43c71e5f129e6cb05cfcde7..51b943cc9770e1b096e3def62c3e902c4ce02772 100644 (file)
@@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
 common pinctrl bindings used by client devices, including the meaning of the
 phrase "pin configuration node".
 
-TZ1090-PDC's pin configuration nodes act as a container for an abitrary number
+TZ1090-PDC's pin configuration nodes act as a container for an arbitrary number
 of subnodes. Each of these subnodes represents some desired configuration for a
 pin, a group, or a list of pins or groups. This configuration can include the
 mux function to select on those pin(s)/group(s), and various pin configuration
index 4b27c99f7f9d496028ae215b7c79d3cc4b1e995b..49d0e6050940256edde1dd8a2dcc2a84c6b5f910 100644 (file)
@@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
 common pinctrl bindings used by client devices, including the meaning of the
 phrase "pin configuration node".
 
-TZ1090's pin configuration nodes act as a container for an abitrary number of
+TZ1090's pin configuration nodes act as a container for an arbitrary number of
 subnodes. Each of these subnodes represents some desired configuration for a
 pin, a group, or a list of pins or groups. This configuration can include the
 mux function to select on those pin(s)/group(s), and various pin configuration
index daa7689560695d1769bdf7d2064ff1f0181873d6..ac4da9fe07bd1fe28d2ceabacbca5453ff41e7a5 100644 (file)
@@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
 common pinctrl bindings used by client devices, including the meaning of the
 phrase "pin configuration node".
 
-Lantiq's pin configuration nodes act as a container for an abitrary number of
+Lantiq's pin configuration nodes act as a container for an arbitrary number of
 subnodes. Each of these subnodes represents some desired configuration for a
 pin, a group, or a list of pins or groups. This configuration can include the
 mux function to select on those group(s), and two pin configuration parameters:
index b5469db1d7adc2d4f2ff36adf6ed75e2802fe116..e89b4677567d0fcd9a4550905ea806d905d951c0 100644 (file)
@@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
 common pinctrl bindings used by client devices, including the meaning of the
 phrase "pin configuration node".
 
-Lantiq's pin configuration nodes act as a container for an abitrary number of
+Lantiq's pin configuration nodes act as a container for an arbitrary number of
 subnodes. Each of these subnodes represents some desired configuration for a
 pin, a group, or a list of pins or groups. This configuration can include the
 mux function to select on those group(s), and two pin configuration parameters:
index 61e73cde9ae9437303b7b095dc2d14c1d4966f62..3c8ce28baad63b164513875221a298dca5bb2066 100644 (file)
@@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
 common pinctrl bindings used by client devices, including the meaning of the
 phrase "pin configuration node".
 
-Tegra's pin configuration nodes act as a container for an abitrary number of
+Tegra's pin configuration nodes act as a container for an arbitrary number of
 subnodes. Each of these subnodes represents some desired configuration for a
 pin, a group, or a list of pins or groups. This configuration can include the
 mux function to select on those pin(s)/group(s), and various pin configuration
index c596a6ad3285acdd52d357a6e4a56cee98a7eb56..5f55be59d914a33aa71f4f432237782fd45f46ad 100644 (file)
@@ -13,7 +13,7 @@ Optional properties:
 Please refer to pinctrl-bindings.txt in this directory for details of the common
 pinctrl bindings used by client devices.
 
-SiRFprimaII's pinmux nodes act as a container for an abitrary number of subnodes.
+SiRFprimaII's pinmux nodes act as a container for an arbitrary number of subnodes.
 Each of these subnodes represents some desired configuration for a group of pins.
 
 Required subnode-properties:
index b4480d5c3aca93a99721829c604c5a823c28bc82..458615596946f6d29ccd60401f87a419fbe0e84a 100644 (file)
@@ -32,7 +32,7 @@ Required properties:
 Please refer to pinctrl-bindings.txt in this directory for details of the common
 pinctrl bindings used by client devices.
 
-SPEAr's pinmux nodes act as a container for an abitrary number of subnodes. Each
+SPEAr's pinmux nodes act as a container for an arbitrary number of subnodes. Each
 of these subnodes represents muxing for a pin, a group, or a list of pins or
 groups.
 
index 2fb90b37aa09080fec5dc37caaafd72d73548607..a7bde64798c7e33a7a84e73bb92f82d5d7fd53eb 100644 (file)
@@ -18,7 +18,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
 common pinctrl bindings used by client devices, including the meaning of the
 phrase "pin configuration node".
 
-Qualcomm's pin configuration nodes act as a container for an abitrary number of
+Qualcomm's pin configuration nodes act as a container for an arbitrary number of
 subnodes. Each of these subnodes represents some desired configuration for a
 pin, a group, or a list of pins or groups. This configuration can include the
 mux function to select on those pin(s)/group(s), and various pin configuration
index ffafa1990a3048baaf36efce244e4c9ea4e7887f..c4ea61ac56f2fdbdd535a6123ba29c3a05acfb01 100644 (file)
@@ -47,7 +47,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
 common pinctrl bindings used by client devices, including the meaning of the
 phrase "pin configuration node".
 
-The pin configuration nodes act as a container for an abitrary number of
+The pin configuration nodes act as a container for an arbitrary number of
 subnodes. Each of these subnodes represents some desired configuration for a
 pin, a group, or a list of pins or groups. This configuration can include the
 mux function to select on those pin(s)/group(s), and various pin configuration
index e33e4dcdce79bdfb51e17b4ab1bbb5602db569b0..6e88e91feb1130796dacf610ceb0a6969c5b0b95 100644 (file)
@@ -18,7 +18,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
 common pinctrl bindings used by client devices, including the meaning of the
 phrase "pin configuration node".
 
-Qualcomm's pin configuration nodes act as a container for an abitrary number of
+Qualcomm's pin configuration nodes act as a container for an arbitrary number of
 subnodes. Each of these subnodes represents some desired configuration for a
 pin, a group, or a list of pins or groups. This configuration can include the
 mux function to select on those pin(s)/group(s), and various pin configuration
index 93b7de91b9f6f62d824436c003ee332705a1efdb..eb8d8aa41f2051a8be7c87e4d10c12b68f6a76d8 100644 (file)
@@ -47,7 +47,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
 common pinctrl bindings used by client devices, including the meaning of the
 phrase "pin configuration node".
 
-The pin configuration nodes act as a container for an abitrary number of
+The pin configuration nodes act as a container for an arbitrary number of
 subnodes. Each of these subnodes represents some desired configuration for a
 pin, a group, or a list of pins or groups. This configuration can include the
 mux function to select on those pin(s)/group(s), and various pin configuration
index d2ea80dc43ebca126558ed3f97d8a395f1b63c7c..e4d6a9d20f7d0d82c684356f03397f95872ffe26 100644 (file)
@@ -18,7 +18,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
 common pinctrl bindings used by client devices, including the meaning of the
 phrase "pin configuration node".
 
-Qualcomm's pin configuration nodes act as a container for an abitrary number of
+Qualcomm's pin configuration nodes act as a container for an arbitrary number of
 subnodes. Each of these subnodes represents some desired configuration for a
 pin, a group, or a list of pins or groups. This configuration can include the
 mux function to select on those pin(s)/group(s), and various pin configuration
index 0ef00be44b0137e7435c13c57c379654937b4592..43404b197933262859e79686d5a8fa25ab12e5f3 100644 (file)
@@ -7,7 +7,10 @@ Required properties:
                            - "renesas,thermal-r8a73a4" (R-Mobile AP6)
                            - "renesas,thermal-r8a7779" (R-Car H1)
                            - "renesas,thermal-r8a7790" (R-Car H2)
-                           - "renesas,thermal-r8a7791" (R-Car M2)
+                           - "renesas,thermal-r8a7791" (R-Car M2-W)
+                           - "renesas,thermal-r8a7792" (R-Car V2H)
+                           - "renesas,thermal-r8a7793" (R-Car M2-N)
+                           - "renesas,thermal-r8a7794" (R-Car E2)
 - reg                  : Address range of the thermal registers.
                          The 1st reg will be recognized as common register
                          if it has "interrupts".
index 723999d737445274728bedde163d3534ca62339c..78efebbf278808b88f23f06c0be73bee1e00ee55 100644 (file)
@@ -34,6 +34,7 @@ chipidea      Chipidea, Inc
 chrp   Common Hardware Reference Platform
 chunghwa       Chunghwa Picture Tubes Ltd.
 cirrus Cirrus Logic, Inc.
+cnm    Chips&Media, Inc.
 cortina        Cortina Systems, Inc.
 crystalfontz   Crystalfontz America, Inc.
 dallas Maxim Integrated Products (formerly Dallas Semiconductor)
@@ -64,8 +65,10 @@ gmt  Global Mixed-mode Technology, Inc.
 google Google, Inc.
 gumstix        Gumstix, Inc.
 gw     Gateworks Corporation
+hannstar       HannStar Display Corporation
 haoyu  Haoyu Microelectronic Co. Ltd.
 hisilicon      Hisilicon Limited.
+hit    Hitachi Ltd.
 honeywell      Honeywell
 hp     Hewlett Packard
 i2se   I2SE GmbH
@@ -92,6 +95,7 @@ maxim Maxim Integrated Products
 mediatek       MediaTek Inc.
 micrel Micrel Inc.
 microchip      Microchip Technology Inc.
+micron Micron Technology Inc.
 mitsubishi     Mitsubishi Electric Corporation
 mosaixtech     Mosaix Technologies, Inc.
 moxa   Moxa
@@ -127,6 +131,7 @@ renesas     Renesas Electronics Corporation
 ricoh  Ricoh Co. Ltd.
 rockchip       Fuzhou Rockchip Electronics Co., Ltd
 samsung        Samsung Semiconductor
+sandisk        Sandisk Corporation
 sbs    Smart Battery System
 schindler      Schindler
 seagate        Seagate Technology PLC
@@ -138,7 +143,7 @@ silergy     Silergy Corp.
 sirf   SiRF Technology, Inc.
 sitronix       Sitronix Technology Corporation
 smsc   Standard Microsystems Corporation
-snps   Synopsys, Inc.
+snps   Synopsys, Inc.
 solidrun       SolidRun
 sony   Sony Corporation
 spansion       Spansion Inc.
diff --git a/Documentation/devicetree/bindings/video/adi,adv7511.txt b/Documentation/devicetree/bindings/video/adi,adv7511.txt
new file mode 100644 (file)
index 0000000..96c25ee
--- /dev/null
@@ -0,0 +1,88 @@
+Analog Device ADV7511(W)/13 HDMI Encoders
+-----------------------------------------
+
+The ADV7511, ADV7511W and ADV7513 are HDMI audio and video transmitters
+compatible with HDMI 1.4 and DVI 1.0. They support color space conversion,
+S/PDIF, CEC and HDCP.
+
+Required properties:
+
+- compatible: Should be one of "adi,adv7511", "adi,adv7511w" or "adi,adv7513"
+- reg: I2C slave address
+
+The ADV7511 supports a large number of input data formats that differ by their
+color depth, color format, clock mode, bit justification and random
+arrangement of components on the data bus. The combination of the following
+properties describe the input and map directly to the video input tables of the
+ADV7511 datasheet that document all the supported combinations.
+
+- adi,input-depth: Number of bits per color component at the input (8, 10 or
+  12).
+- adi,input-colorspace: The input color space, one of "rgb", "yuv422" or
+  "yuv444".
+- adi,input-clock: The input clock type, one of "1x" (one clock cycle per
+  pixel), "2x" (two clock cycles per pixel), "ddr" (one clock cycle per pixel,
+  data driven on both edges).
+
+The following input format properties are required except in "rgb 1x" and
+"yuv444 1x" modes, in which case they must not be specified.
+
+- adi,input-style: The input components arrangement variant (1, 2 or 3), as
+  listed in the input format tables in the datasheet.
+- adi,input-justification: The input bit justification ("left", "evenly",
+  "right").
+
+Optional properties:
+
+- interrupts: Specifier for the ADV7511 interrupt
+- pd-gpios: Specifier for the GPIO connected to the power down signal
+
+- adi,clock-delay: Video data clock delay relative to the pixel clock, in ps
+  (-1200 ps .. 1600 ps). Defaults to no delay.
+- adi,embedded-sync: The input uses synchronization signals embedded in the
+  data stream (similar to BT.656). Defaults to separate H/V synchronization
+  signals.
+
+Required nodes:
+
+The ADV7511 has two video ports. Their connections are modelled using the OF
+graph bindings specified in Documentation/devicetree/bindings/graph.txt.
+
+- Video port 0 for the RGB or YUV input
+- Video port 1 for the HDMI output
+
+
+Example
+-------
+
+       adv7511w: hdmi@39 {
+               compatible = "adi,adv7511w";
+               reg = <39>;
+               interrupt-parent = <&gpio3>;
+               interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
+
+               adi,input-depth = <8>;
+               adi,input-colorspace = "rgb";
+               adi,input-clock = "1x";
+               adi,input-style = <1>;
+               adi,input-justification = "evenly";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               adv7511w_in: endpoint {
+                                       remote-endpoint = <&dpi_out>;
+                               };
+                       };
+
+                       port@1 {
+                               reg = <1>;
+                               adv7511_out: endpoint {
+                                       remote-endpoint = <&hdmi_connector_in>;
+                               };
+                       };
+               };
+       };
index e74243b4b317ff74728c50b09e66b5f9347f6d69..ca2b4aacd9afb5e81d508df0371b4eb5b7f4f9b0 100644 (file)
@@ -4,6 +4,7 @@ Required properties:
   - compatible: value should be one of the following
                "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
                "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
+               "samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */
                "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
   - reg: physical base address and length of the registers set for the device
   - interrupts: should contain DSI interrupt
index 4e6c77c85546790bbb8b85c6528e31287ac9e81a..cf1af637102169a0c2b8059e4440132fce0d6bb2 100644 (file)
@@ -11,6 +11,7 @@ Required properties:
                "samsung,s5pv210-fimd"; /* for S5PV210 SoC */
                "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
                "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
+               "samsung,exynos4415-fimd"; /* for Exynos4415 SoC */
                "samsung,exynos5250-fimd"; /* for Exynos5 SoCs */
 
 - reg: physical base address and length of the FIMD registers set.
index 530850a72735ac6c47c46a51e71b84ca427cee5d..a27c950ece61b0d312fbba2a3757fab71c5b3616 100644 (file)
@@ -64,7 +64,7 @@ is formed.
 At mount time, the two directories given as mount options "lowerdir" and
 "upperdir" are combined into a merged directory:
 
-  mount -t overlayfs overlayfs -olowerdir=/lower,upperdir=/upper,\
+  mount -t overlay overlay -olowerdir=/lower,upperdir=/upper,\
 workdir=/work /merged
 
 The "workdir" needs to be an empty directory on the same filesystem
index e1ae127ed099d4934e1d7fb95f1a8ba819c1da9b..1ec0db7879d310dd3525eee439bc291a3e595c02 100644 (file)
@@ -38,22 +38,38 @@ Contents
         7.2.1 Status packet
         7.2.2 Head packet
         7.2.3 Motion packet
+ 8. Trackpoint (for Hardware version 3 and 4)
+    8.1 Registers
+    8.2 Native relative mode 6 byte packet format
+        8.2.1 Status Packet
 
 
 
 1. Introduction
    ~~~~~~~~~~~~
 
-Currently the Linux Elantech touchpad driver is aware of two different
-hardware versions unimaginatively called version 1 and version 2. Version 1
-is found in "older" laptops and uses 4 bytes per packet. Version 2 seems to
-be introduced with the EeePC and uses 6 bytes per packet, and provides
-additional features such as position of two fingers, and width of the touch.
+Currently the Linux Elantech touchpad driver is aware of four different
+hardware versions unimaginatively called version 1,version 2, version 3
+and version 4. Version 1 is found in "older" laptops and uses 4 bytes per
+packet. Version 2 seems to be introduced with the EeePC and uses 6 bytes
+per packet, and provides additional features such as position of two fingers,
+and width of the touch.  Hardware version 3 uses 6 bytes per packet (and
+for 2 fingers the concatenation of two 6 bytes packets) and allows tracking
+of up to 3 fingers. Hardware version 4 uses 6 bytes per packet, and can
+combine a status packet with multiple head or motion packets. Hardware version
+4 allows tracking up to 5 fingers.
+
+Some Hardware version 3 and version 4 also have a trackpoint which uses a
+separate packet format. It is also 6 bytes per packet.
 
 The driver tries to support both hardware versions and should be compatible
 with the Xorg Synaptics touchpad driver and its graphical configuration
 utilities.
 
+Note that a mouse button is also associated with either the touchpad or the
+trackpoint when a trackpoint is available.  Disabling the Touchpad in xorg
+(TouchPadOff=0) will also disable the buttons associated with the touchpad.
+
 Additionally the operation of the touchpad can be altered by adjusting the
 contents of some of its internal registers. These registers are represented
 by the driver as sysfs entries under /sys/bus/serio/drivers/psmouse/serio?
@@ -78,7 +94,7 @@ completeness sake.
 2. Extra knobs
    ~~~~~~~~~~~
 
-Currently the Linux Elantech touchpad driver provides two extra knobs under
+Currently the Linux Elantech touchpad driver provides three extra knobs under
 /sys/bus/serio/drivers/psmouse/serio? for the user.
 
 * debug
@@ -112,6 +128,20 @@ Currently the Linux Elantech touchpad driver provides two extra knobs under
    data consistency checking can be done. For now checking is disabled by
    default. Currently even turning it on will do nothing.
 
+* crc_enabled
+
+   Sets crc_enabled to 0/1. The name "crc_enabled" is the official name of
+   this integrity check, even though it is not an actual cyclic redundancy
+   check.
+
+   Depending on the state of crc_enabled, certain basic data integrity
+   verification is done by the driver on hardware version 3 and 4. The
+   driver will reject any packet that appears corrupted. Using this knob,
+   The state of crc_enabled can be altered with this knob.
+
+   Reading the crc_enabled value will show the active value. Echoing
+   "0" or "1" to this file will set the state to "0" or "1".
+
 /////////////////////////////////////////////////////////////////////////////
 
 3. Differentiating hardware versions
@@ -746,3 +776,42 @@ byte 5:
 
         byte 0 ~ 2 for one finger
         byte 3 ~ 5 for another
+
+
+8. Trackpoint (for Hardware version 3 and 4)
+   =========================================
+8.1 Registers
+    ~~~~~~~~~
+No special registers have been identified.
+
+8.2 Native relative mode 6 byte packet format
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+8.2.1 Status Packet
+      ~~~~~~~~~~~~~
+
+byte 0:
+   bit   7   6   5   4   3   2   1   0
+         0   0  sx  sy   0   M   R   L
+byte 1:
+   bit   7   6   5   4   3   2   1   0
+       ~sx   0   0   0   0   0   0   0
+byte 2:
+   bit   7   6   5   4   3   2   1   0
+       ~sy   0   0   0   0   0   0   0
+byte 3:
+   bit   7   6   5   4   3   2   1   0
+         0   0 ~sy ~sx   0   1   1   0
+byte 4:
+   bit   7   6   5   4   3   2   1   0
+        x7  x6  x5  x4  x3  x2  x1  x0
+byte 5:
+   bit   7   6   5   4   3   2   1   0
+        y7  y6  y5  y4  y3  y2  y1  y0
+
+
+         x and y are written in two's complement spread
+             over 9 bits with sx/sy the relative top bit and
+             x7..x0 and y7..y0 the lower bits.
+        ~sx is the inverse of sx, ~sy is the inverse of sy.
+         The sign of y is opposite to what the input driver
+             expects for a relative movement
index 4c81a860cc2bcd31d58e13e8b9539744a241adcc..479f33204a3727a51ece7ba2cc2b747e27788e36 100644 (file)
@@ -3621,7 +3621,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        usb-storage.delay_use=
                        [UMS] The delay in seconds before a new device is
-                       scanned for Logical Units (default 5).
+                       scanned for Logical Units (default 1).
 
        usb-storage.quirks=
                        [UMS] A list of quirks entries to supplement or
index 0307e2875f2159cb669b741f9d6a949618c3a055..a476b08a43e0dac5642a0bdfba76b8523be1033e 100644 (file)
@@ -56,6 +56,13 @@ ip_forward_use_pmtu - BOOLEAN
        0 - disabled
        1 - enabled
 
+fwmark_reflect - BOOLEAN
+       Controls the fwmark of kernel-generated IPv4 reply packets that are not
+       associated with a socket for example, TCP RSTs or ICMP echo replies).
+       If unset, these packets have a fwmark of zero. If set, they have the
+       fwmark of the packet they are replying to.
+       Default: 0
+
 route/max_size - INTEGER
        Maximum number of routes allowed in the kernel.  Increase
        this when using large numbers of interfaces and/or routes.
@@ -1201,6 +1208,13 @@ conf/all/forwarding - BOOLEAN
 proxy_ndp - BOOLEAN
        Do proxy ndp.
 
+fwmark_reflect - BOOLEAN
+       Controls the fwmark of kernel-generated IPv6 reply packets that are not
+       associated with a socket for example, TCP RSTs or ICMPv6 echo replies).
+       If unset, these packets have a fwmark of zero. If set, they have the
+       fwmark of the packet they are replying to.
+       Default: 0
+
 conf/interface/*:
        Change special settings per interface.
 
index 412f45ca2d73e3cd31a487e5fe13b73fc552d9f5..1d6d02d6ba52b531642436e8a6d8c0640af5467d 100644 (file)
@@ -136,7 +136,7 @@ SOF_TIMESTAMPING_OPT_ID:
 
   This option is implemented only for transmit timestamps. There, the
   timestamp is always looped along with a struct sock_extended_err.
-  The option modifies field ee_info to pass an id that is unique
+  The option modifies field ee_data to pass an id that is unique
   among all possibly concurrently outstanding timestamp requests for
   that socket. In practice, it is a monotonically increasing u32
   (that wraps).
index eeb11a28e4fcbc96c4c3456eb2fbde71506a59eb..e5a940e3d304dbccbc673bfa990f76919a56bb8a 100644 (file)
@@ -221,12 +221,11 @@ ccs_out_mode: specify the allowed video output crop/compose/scaling combination
                       key, not quality.
 
 multiplanar: select whether each device instance supports multi-planar formats,
-       and thus the V4L2 multi-planar API. By default the first device instance
-       is single-planar, the second multi-planar, and it keeps alternating.
+       and thus the V4L2 multi-planar API. By default device instances are
+       single-planar.
 
        This module option can override that for each instance. Values are:
 
-               0: use alternating single and multi-planar devices.
                1: this is a single-planar instance.
                2: this is a multi-planar instance.
 
@@ -975,9 +974,8 @@ is set, then the alpha component is only used for the color red and set to
 0 otherwise.
 
 The driver has to be configured to support the multiplanar formats. By default
-the first driver instance is single-planar, the second is multi-planar, and it
-keeps alternating. This can be changed by setting the multiplanar module option,
-see section 1 for more details on that option.
+the driver instances are single-planar. This can be changed by setting the
+multiplanar module option, see section 1 for more details on that option.
 
 If the driver instance is using the multiplanar formats/API, then the first
 single planar format (YUYV) and the multiplanar NV16M and NV61M formats the
@@ -1021,7 +1019,7 @@ the output overlay for the video output, turn on video looping and capture
 to see the blended framebuffer overlay that's being written to by the second
 instance. This setup would require the following commands:
 
-       $ sudo modprobe vivid n_devs=2 node_types=0x10101,0x1 multiplanar=1,1
+       $ sudo modprobe vivid n_devs=2 node_types=0x10101,0x1
        $ v4l2-ctl -d1 --find-fb
        /dev/fb1 is the framebuffer associated with base address 0x12800000
        $ sudo v4l2-ctl -d2 --set-fbuf fb=1
index 3c6427190be2a4e821eb24cd9e9b0ca25086776a..296c02d39c29a3fc48d1296c31f811e1c5311786 100644 (file)
@@ -618,6 +618,16 @@ S: Maintained
 F:     drivers/iommu/amd_iommu*.[ch]
 F:     include/linux/amd-iommu.h
 
+AMD KFD
+M:      Oded Gabbay <oded.gabbay@amd.com>
+L:      dri-devel@lists.freedesktop.org
+T:      git git://people.freedesktop.org/~gabbayo/linux.git
+S:      Supported
+F:      drivers/gpu/drm/amd/amdkfd/
+F:      drivers/gpu/drm/radeon/radeon_kfd.c
+F:      drivers/gpu/drm/radeon/radeon_kfd.h
+F:      include/uapi/linux/kfd_ioctl.h
+
 AMD MICROCODE UPDATE SUPPORT
 M:     Andreas Herrmann <herrmann.der.user@googlemail.com>
 L:     amd64-microcode@amd64.org
@@ -1543,6 +1553,7 @@ F:        arch/arm/mach-pxa/include/mach/z2.h
 
 ARM/ZYNQ ARCHITECTURE
 M:     Michal Simek <michal.simek@xilinx.com>
+R:     Sören Brinkmann <soren.brinkmann@xilinx.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://wiki.xilinx.com
 T:     git git://git.xilinx.com/linux-xlnx.git
@@ -1827,7 +1838,7 @@ F:        include/net/ax25.h
 F:     net/ax25/
 
 AZ6007 DVB DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -2071,8 +2082,9 @@ F:        drivers/clocksource/bcm_kona_timer.c
 
 BROADCOM BCM2835 ARM ARCHITECTURE
 M:     Stephen Warren <swarren@wwwdotorg.org>
+M:     Lee Jones <lee@kernel.org>
 L:     linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-rpi.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git
 S:     Maintained
 N:     bcm2835
 
@@ -2196,7 +2208,7 @@ F:        Documentation/filesystems/btrfs.txt
 F:     fs/btrfs/
 
 BTTV VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -2717,7 +2729,7 @@ F:        drivers/media/common/cx2341x*
 F:     include/media/cx2341x*
 
 CX88 VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -2742,6 +2754,13 @@ W:       http://www.chelsio.com
 S:     Supported
 F:     drivers/net/ethernet/chelsio/cxgb3/
 
+CXGB3 ISCSI DRIVER (CXGB3I)
+M:      Karen Xie <kxie@chelsio.com>
+L:      linux-scsi@vger.kernel.org
+W:      http://www.chelsio.com
+S:      Supported
+F:      drivers/scsi/cxgbi/cxgb3i
+
 CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
 M:     Steve Wise <swise@chelsio.com>
 L:     linux-rdma@vger.kernel.org
@@ -2756,6 +2775,13 @@ W:       http://www.chelsio.com
 S:     Supported
 F:     drivers/net/ethernet/chelsio/cxgb4/
 
+CXGB4 ISCSI DRIVER (CXGB4I)
+M:      Karen Xie <kxie@chelsio.com>
+L:      linux-scsi@vger.kernel.org
+W:      http://www.chelsio.com
+S:      Supported
+F:      drivers/scsi/cxgbi/cxgb4i
+
 CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
 M:     Steve Wise <swise@chelsio.com>
 L:     linux-rdma@vger.kernel.org
@@ -3190,6 +3216,13 @@ F:       drivers/gpu/drm/exynos/
 F:     include/drm/exynos*
 F:     include/uapi/drm/exynos*
 
+DRM DRIVERS FOR FREESCALE IMX
+M:     Philipp Zabel <p.zabel@pengutronix.de>
+L:     dri-devel@lists.freedesktop.org
+S:     Maintained
+F:     drivers/gpu/drm/imx/
+F:     Documentation/devicetree/bindings/drm/imx/
+
 DRM DRIVERS FOR NVIDIA TEGRA
 M:     Thierry Reding <thierry.reding@gmail.com>
 M:     Terje Bergström <tbergstrom@nvidia.com>
@@ -3386,7 +3419,7 @@ F:        fs/ecryptfs/
 EDAC-CORE
 M:     Doug Thompson <dougthompson@xmission.com>
 M:     Borislav Petkov <bp@alien8.de>
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-edac@vger.kernel.org
 W:     bluesmoke.sourceforge.net
 S:     Supported
@@ -3435,7 +3468,7 @@ S:        Maintained
 F:     drivers/edac/e7xxx_edac.c
 
 EDAC-GHES
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-edac@vger.kernel.org
 W:     bluesmoke.sourceforge.net
 S:     Maintained
@@ -3463,21 +3496,21 @@ S:      Maintained
 F:     drivers/edac/i5000_edac.c
 
 EDAC-I5400
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-edac@vger.kernel.org
 W:     bluesmoke.sourceforge.net
 S:     Maintained
 F:     drivers/edac/i5400_edac.c
 
 EDAC-I7300
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-edac@vger.kernel.org
 W:     bluesmoke.sourceforge.net
 S:     Maintained
 F:     drivers/edac/i7300_edac.c
 
 EDAC-I7CORE
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-edac@vger.kernel.org
 W:     bluesmoke.sourceforge.net
 S:     Maintained
@@ -3520,7 +3553,7 @@ S:        Maintained
 F:     drivers/edac/r82600_edac.c
 
 EDAC-SBRIDGE
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-edac@vger.kernel.org
 W:     bluesmoke.sourceforge.net
 S:     Maintained
@@ -3580,7 +3613,7 @@ S:        Maintained
 F:     drivers/net/ethernet/ibm/ehea/
 
 EM28XX VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -4714,6 +4747,7 @@ L:        linux-iio@vger.kernel.org
 S:     Maintained
 F:     drivers/iio/
 F:     drivers/staging/iio/
+F:     include/linux/iio/
 
 IKANOS/ADI EAGLE ADSL USB DRIVER
 M:     Matthieu Castet <castet.matthieu@free.fr>
@@ -5945,7 +5979,7 @@ S:        Maintained
 F:     drivers/media/radio/radio-maxiradio*
 
 MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 P:     LinuxTV.org Project
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
@@ -6594,6 +6628,23 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap.git
 S:     Maintained
 F:     arch/arm/*omap*/
 F:     drivers/i2c/busses/i2c-omap.c
+F:     drivers/irqchip/irq-omap-intc.c
+F:     drivers/mfd/*omap*.c
+F:     drivers/mfd/menelaus.c
+F:     drivers/mfd/palmas.c
+F:     drivers/mfd/tps65217.c
+F:     drivers/mfd/tps65218.c
+F:     drivers/mfd/tps65910.c
+F:     drivers/mfd/twl-core.[ch]
+F:     drivers/mfd/twl4030*.c
+F:     drivers/mfd/twl6030*.c
+F:     drivers/mfd/twl6040*.c
+F:     drivers/regulator/palmas-regulator*.c
+F:     drivers/regulator/pbias-regulator.c
+F:     drivers/regulator/tps65217-regulator.c
+F:     drivers/regulator/tps65218-regulator.c
+F:     drivers/regulator/tps65910-regulator.c
+F:     drivers/regulator/twl-regulator.c
 F:     include/linux/i2c-omap.h
 
 OMAP DEVICE TREE SUPPORT
@@ -6604,6 +6655,9 @@ L:        devicetree@vger.kernel.org
 S:     Maintained
 F:     arch/arm/boot/dts/*omap*
 F:     arch/arm/boot/dts/*am3*
+F:     arch/arm/boot/dts/*am4*
+F:     arch/arm/boot/dts/*am5*
+F:     arch/arm/boot/dts/*dra7*
 
 OMAP CLOCK FRAMEWORK SUPPORT
 M:     Paul Walmsley <paul@pwsan.com>
@@ -6851,11 +6905,12 @@ F:      drivers/scsi/osd/
 F:     include/scsi/osd_*
 F:     fs/exofs/
 
-OVERLAYFS FILESYSTEM
+OVERLAY FILESYSTEM
 M:     Miklos Szeredi <miklos@szeredi.hu>
-L:     linux-fsdevel@vger.kernel.org
+L:     linux-unionfs@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs.git
 S:     Supported
-F:     fs/overlayfs/*
+F:     fs/overlayfs/
 F:     Documentation/filesystems/overlayfs.txt
 
 P54 WIRELESS DRIVER
@@ -7179,6 +7234,7 @@ F:        drivers/crypto/picoxcell*
 
 PIN CONTROL SUBSYSTEM
 M:     Linus Walleij <linus.walleij@linaro.org>
+L:     linux-gpio@vger.kernel.org
 S:     Maintained
 F:     drivers/pinctrl/
 F:     include/linux/pinctrl/
@@ -7974,7 +8030,7 @@ S:        Odd Fixes
 F:     drivers/media/i2c/saa6588*
 
 SAA7134 VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -8432,7 +8488,7 @@ S:        Maintained
 F:     drivers/media/radio/si4713/radio-usb-si4713.c
 
 SIANO DVB DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -8483,7 +8539,6 @@ F:        arch/arm/mach-s3c24xx/bast-irq.c
 TI DAVINCI MACHINE SUPPORT
 M:     Sekhar Nori <nsekhar@ti.com>
 M:     Kevin Hilman <khilman@deeprootsystems.com>
-L:     davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers)
 T:     git git://gitorious.org/linux-davinci/linux-davinci.git
 Q:     http://patchwork.kernel.org/project/linux-davinci/list/
 S:     Supported
@@ -8493,7 +8548,6 @@ F:        drivers/i2c/busses/i2c-davinci.c
 TI DAVINCI SERIES MEDIA DRIVER
 M:     Lad, Prabhakar <prabhakar.csengg@gmail.com>
 L:     linux-media@vger.kernel.org
-L:     davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers)
 W:     http://linuxtv.org/
 Q:     http://patchwork.linuxtv.org/project/linux-media/list/
 T:     git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git
@@ -8645,7 +8699,9 @@ S:        Maintained
 F:     drivers/leds/leds-net48xx.c
 
 SOFTLOGIC 6x10 MPEG CODEC
-M:     Ismael Luceno <ismael.luceno@corp.bluecherry.net>
+M:     Bluecherry Maintainers <maintainers@bluecherrydvr.com>
+M:     Andrey Utkin <andrey.utkin@corp.bluecherry.net>
+M:     Andrey Utkin <andrey.krieger.utkin@gmail.com>
 L:     linux-media@vger.kernel.org
 S:     Supported
 F:     drivers/media/pci/solo6x10/
@@ -9119,7 +9175,7 @@ S:        Maintained
 F:     drivers/media/i2c/tda9840*
 
 TEA5761 TUNER DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -9127,7 +9183,7 @@ S:        Odd fixes
 F:     drivers/media/tuners/tea5761.*
 
 TEA5767 TUNER DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -9439,7 +9495,7 @@ F:        include/linux/shmem_fs.h
 F:     mm/shmem.c
 
 TM6000 VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -9703,11 +9759,6 @@ S:       Maintained
 F:     Documentation/hid/hiddev.txt
 F:     drivers/hid/usbhid/
 
-USB/IP DRIVERS
-L:     linux-usb@vger.kernel.org
-S:     Orphan
-F:     drivers/staging/usbip/
-
 USB ISP116X DRIVER
 M:     Olav Kongas <ok@artecdesign.ee>
 L:     linux-usb@vger.kernel.org
@@ -10265,7 +10316,7 @@ S:      Maintained
 F:     arch/x86/kernel/cpu/mcheck/*
 
 XC2028/3028 TUNER DRIVER
-M:     Mauro Carvalho Chehab <m.chehab@samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 L:     linux-media@vger.kernel.org
 W:     http://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
index ffc1ce2b03452780dffef34fc42fb6f3b680df56..fd80c6e9bc2367f79f47edebb15649d23d341791 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 18
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION =
 NAME = Diseased Newt
 
 # *DOCUMENTATION*
@@ -297,7 +297,7 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
 
 HOSTCC       = gcc
 HOSTCXX      = g++
-HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
+HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
 HOSTCXXFLAGS = -O2
 
 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
@@ -401,7 +401,8 @@ KBUILD_CPPFLAGS := -D__KERNEL__
 KBUILD_CFLAGS   := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
                   -fno-strict-aliasing -fno-common \
                   -Werror-implicit-function-declaration \
-                  -Wno-format-security
+                  -Wno-format-security \
+                  -std=gnu89
 
 KBUILD_AFLAGS_KERNEL :=
 KBUILD_CFLAGS_KERNEL :=
index 03dc4c1a8736e78e5878298b9ff0c797e67f57fb..d8f6a2ec3d4e065bb49de2ff75801269090805ea 100644 (file)
@@ -1187,7 +1187,7 @@ config DEBUG_UART_VIRT
        default 0xf1c28000 if DEBUG_SUNXI_UART0
        default 0xf1c28400 if DEBUG_SUNXI_UART1
        default 0xf1f02800 if DEBUG_SUNXI_R_UART
-       default 0xf2100000 if DEBUG_PXA_UART1
+       default 0xf6200000 if DEBUG_PXA_UART1
        default 0xf4090000 if ARCH_LPC32XX
        default 0xf4200000 if ARCH_GEMINI
        default 0xf7000000 if DEBUG_S3C24XX_UART && (DEBUG_S3C_UART0 || \
index 413fd94b53012dacc6596d2e9b225c04a7dab97a..68be9017593df10f235ca228a6837aaa11f5c818 100644 (file)
@@ -397,8 +397,7 @@ dtb_check_done:
                add     sp, sp, r6
 #endif
 
-               tst     r4, #1
-               bleq    cache_clean_flush
+               bl      cache_clean_flush
 
                adr     r0, BSYM(restart)
                add     r0, r0, r6
@@ -1047,6 +1046,8 @@ cache_clean_flush:
                b       call_cache_fn
 
 __armv4_mpu_cache_flush:
+               tst     r4, #1
+               movne   pc, lr
                mov     r2, #1
                mov     r3, #0
                mcr     p15, 0, ip, c7, c6, 0   @ invalidate D cache
@@ -1064,6 +1065,8 @@ __armv4_mpu_cache_flush:
                mov     pc, lr
                
 __fa526_cache_flush:
+               tst     r4, #1
+               movne   pc, lr
                mov     r1, #0
                mcr     p15, 0, r1, c7, c14, 0  @ clean and invalidate D cache
                mcr     p15, 0, r1, c7, c5, 0   @ flush I cache
@@ -1072,13 +1075,16 @@ __fa526_cache_flush:
 
 __armv6_mmu_cache_flush:
                mov     r1, #0
-               mcr     p15, 0, r1, c7, c14, 0  @ clean+invalidate D
+               tst     r4, #1
+               mcreq   p15, 0, r1, c7, c14, 0  @ clean+invalidate D
                mcr     p15, 0, r1, c7, c5, 0   @ invalidate I+BTB
-               mcr     p15, 0, r1, c7, c15, 0  @ clean+invalidate unified
+               mcreq   p15, 0, r1, c7, c15, 0  @ clean+invalidate unified
                mcr     p15, 0, r1, c7, c10, 4  @ drain WB
                mov     pc, lr
 
 __armv7_mmu_cache_flush:
+               tst     r4, #1
+               bne     iflush
                mrc     p15, 0, r10, c0, c1, 5  @ read ID_MMFR1
                tst     r10, #0xf << 16         @ hierarchical cache (ARMv7)
                mov     r10, #0
@@ -1139,6 +1145,8 @@ iflush:
                mov     pc, lr
 
 __armv5tej_mmu_cache_flush:
+               tst     r4, #1
+               movne   pc, lr
 1:             mrc     p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
                bne     1b
                mcr     p15, 0, r0, c7, c5, 0   @ flush I cache
@@ -1146,6 +1154,8 @@ __armv5tej_mmu_cache_flush:
                mov     pc, lr
 
 __armv4_mmu_cache_flush:
+               tst     r4, #1
+               movne   pc, lr
                mov     r2, #64*1024            @ default: 32K dcache size (*2)
                mov     r11, #32                @ default: 32 byte line size
                mrc     p15, 0, r3, c0, c0, 1   @ read cache type
@@ -1179,6 +1189,8 @@ no_cache_id:
 
 __armv3_mmu_cache_flush:
 __armv3_mpu_cache_flush:
+               tst     r4, #1
+               movne   pc, lr
                mov     r1, #0
                mcr     p15, 0, r1, c7, c0, 0   @ invalidate whole cache v3
                mov     pc, lr
index e2156a583de76a2cd67dcbc06593fb192caaf7e4..c4b968f0feb53434b247c66e37ceac51a7f7b4aa 100644 (file)
                        reg = <0x00060000 0x00020000>;
                };
                partition@4 {
-                       label = "NAND.u-boot-spl";
+                       label = "NAND.u-boot-spl-os";
                        reg = <0x00080000 0x00040000>;
                };
                partition@5 {
index e7ac47fa6615e33ee52e3ef58025721f49c7eb08..a521ac0a7d5a2f53db4caaeea32b0a6432f4a5ad 100644 (file)
                dcdc3: regulator-dcdc3 {
                        compatible = "ti,tps65218-dcdc3";
                        regulator-name = "vdcdc3";
-                       regulator-min-microvolt = <1350000>;
-                       regulator-max-microvolt = <1350000>;
+                       regulator-min-microvolt = <1500000>;
+                       regulator-max-microvolt = <1500000>;
                        regulator-boot-on;
                        regulator-always-on;
                };
index 859ff3d620ee6734bcd0e802cf53fd8b94069547..87aa4f3b8b3d516807cb9446313283df2c0860b0 100644 (file)
                dcdc3: regulator-dcdc3 {
                        compatible = "ti,tps65218-dcdc3";
                        regulator-name = "vdds_ddr";
-                       regulator-min-microvolt = <1350000>;
-                       regulator-max-microvolt = <1350000>;
+                       regulator-min-microvolt = <1500000>;
+                       regulator-max-microvolt = <1500000>;
                        regulator-boot-on;
                        regulator-always-on;
                };
index ac3e4859935f1ad31da67107eda970eec6f526ba..f7e9bba10bd6db397342d2f1561da69bc8bc7808 100644 (file)
                dcdc3: regulator-dcdc3 {
                        compatible = "ti,tps65218-dcdc3";
                        regulator-name = "vdcdc3";
-                       regulator-min-microvolt = <1350000>;
-                       regulator-max-microvolt = <1350000>;
+                       regulator-min-microvolt = <1500000>;
+                       regulator-max-microvolt = <1500000>;
                        regulator-boot-on;
                        regulator-always-on;
                };
index e51fcef884a43d629ab12132e549cc8a0b731405..60429ad1c5d8451c1e481e0190ad5d312dc54522 100644 (file)
        num-cs = <1>;
 };
 
+&usbdrd_dwc3 {
+       dr_mode = "host";
+};
+
 #include "cros-ec-keyboard.dtsi"
index f21b9aa00fbb214f4dee8b0b1980884f5411c70f..d55c1a2eb798966340325afbb5c3009c8bcef28c 100644 (file)
                #size-cells = <1>;
                ranges;
 
-               dwc3 {
+               usbdrd_dwc3: dwc3 {
                        compatible = "synopsys,dwc3";
                        reg = <0x12000000 0x10000>;
                        interrupts = <0 72 0>;
index d46c213a17ad5de43972fd4f7b28beda61b53347..eed697a6bd6bb290ca16f2536cf04651d56c2d15 100644 (file)
                        clocks = <&cpg_clocks R8A7740_CLK_S>,
                                 <&cpg_clocks R8A7740_CLK_S>, <&sub_clk>,
                                 <&cpg_clocks R8A7740_CLK_B>,
-                                <&sub_clk>, <&sub_clk>,
+                                <&cpg_clocks R8A7740_CLK_HPP>, <&sub_clk>,
                                 <&cpg_clocks R8A7740_CLK_B>;
                        #clock-cells = <1>;
                        renesas,clock-indices = <
index c160404e4d405eb2acedc5affd96781ee8bca162..765978fe430bd32b185a7f727b4edf34cd322ba2 100644 (file)
                        gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>;
                };
        };
+
+       vga-encoder {
+               compatible = "adi,adv7123";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               vga_enc_in: endpoint {
+                                       remote-endpoint = <&du_out_rgb0>;
+                               };
+                       };
+                       port@1 {
+                               reg = <1>;
+                               vga_enc_out: endpoint {
+                                       remote-endpoint = <&vga_in>;
+                               };
+                       };
+               };
+       };
+
+       vga {
+               compatible = "vga-connector";
+
+               port {
+                       vga_in: endpoint {
+                               remote-endpoint = <&vga_enc_out>;
+                       };
+               };
+       };
+
+       lvds-encoder {
+               compatible = "thine,thc63lvdm83d";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               lvds_enc_in: endpoint {
+                                       remote-endpoint = <&du_out_rgb1>;
+                               };
+                       };
+                       port@1 {
+                               reg = <1>;
+                               lvds_connector: endpoint {
+                               };
+                       };
+               };
+       };
+};
+
+&du {
+       pinctrl-0 = <&du_pins>;
+       pinctrl-names = "default";
+       status = "okay";
+
+       ports {
+               port@0 {
+                       endpoint {
+                               remote-endpoint = <&vga_enc_in>;
+                       };
+               };
+               port@1 {
+                       endpoint {
+                               remote-endpoint = <&lvds_enc_in>;
+                       };
+               };
+       };
 };
 
 &irqpin0 {
 };
 
 &pfc {
+       du_pins: du {
+               du0 {
+                       renesas,groups = "du0_rgb888", "du0_sync_1", "du0_clk_out_0";
+                       renesas,function = "du0";
+               };
+               du1 {
+                       renesas,groups = "du1_rgb666", "du1_sync_1", "du1_clk_out";
+                       renesas,function = "du1";
+               };
+       };
+
        lan0_pins: lan0 {
                intc {
                        renesas,groups = "intc_irq1_b";
index 7cfba9aa1b415cc180dae8f0a75901fb5921f488..fda814ed191d1531502480f308cc481e18ffad37 100644 (file)
                status = "disabled";
        };
 
+       du: display@fff80000 {
+               compatible = "renesas,du-r8a7779";
+               reg = <0 0xfff80000 0 0x40000>;
+               interrupts = <0 31 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp1_clks R8A7779_CLK_DU>;
+               status = "disabled";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               du_out_rgb0: endpoint {
+                               };
+                       };
+                       port@1 {
+                               reg = <1>;
+                               du_out_rgb1: endpoint {
+                               };
+                       };
+               };
+       };
+
        clocks {
                #address-cells = <1>;
                #size-cells = <1>;
index 69098b906b3919d2653dd244b6cc3220d6dfb741..5237d463ae1d59d2defb8a9460abb06b33905db0 100644 (file)
                states = <3300000 1
                          1800000 0>;
        };
+
+       vga-encoder {
+               compatible = "adi,adv7123";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               adv7123_in: endpoint {
+                                       remote-endpoint = <&du_out_rgb>;
+                               };
+                       };
+                       port@1 {
+                               reg = <1>;
+                               adv7123_out: endpoint {
+                                       remote-endpoint = <&vga_in>;
+                               };
+                       };
+               };
+       };
+
+       vga {
+               compatible = "vga-connector";
+
+               port {
+                       vga_in: endpoint {
+                               remote-endpoint = <&adv7123_out>;
+                       };
+               };
+       };
+};
+
+&du {
+       pinctrl-0 = <&du_pins>;
+       pinctrl-names = "default";
+       status = "okay";
+
+       ports {
+               port@0 {
+                       endpoint {
+                               remote-endpoint = <&adv7123_in>;
+                       };
+               };
+               port@2 {
+                       lvds_connector: endpoint {
+                       };
+               };
+       };
 };
 
 &extal_clk {
 };
 
 &pfc {
-       pinctrl-0 = <&du_pins>;
-       pinctrl-names = "default";
-
        du_pins: du {
                renesas,groups = "du_rgb666", "du_sync_1", "du_clk_out_0";
                renesas,function = "du";
index d0e17733dc1a340608b10b3f4f595e93ec53d45d..0c20c90d8c0613f56207bcc421cf4c74c076c367 100644 (file)
                status = "disabled";
        };
 
+       vsp1@fe920000 {
+               compatible = "renesas,vsp1";
+               reg = <0 0xfe920000 0 0x8000>;
+               interrupts = <0 266 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp1_clks R8A7790_CLK_VSP1_R>;
+
+               renesas,has-sru;
+               renesas,#rpf = <5>;
+               renesas,#uds = <1>;
+               renesas,#wpf = <4>;
+       };
+
+       vsp1@fe928000 {
+               compatible = "renesas,vsp1";
+               reg = <0 0xfe928000 0 0x8000>;
+               interrupts = <0 267 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp1_clks R8A7790_CLK_VSP1_S>;
+
+               renesas,has-lut;
+               renesas,has-sru;
+               renesas,#rpf = <5>;
+               renesas,#uds = <3>;
+               renesas,#wpf = <4>;
+       };
+
+       vsp1@fe930000 {
+               compatible = "renesas,vsp1";
+               reg = <0 0xfe930000 0 0x8000>;
+               interrupts = <0 246 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp1_clks R8A7790_CLK_VSP1_DU0>;
+
+               renesas,has-lif;
+               renesas,has-lut;
+               renesas,#rpf = <4>;
+               renesas,#uds = <1>;
+               renesas,#wpf = <4>;
+       };
+
+       vsp1@fe938000 {
+               compatible = "renesas,vsp1";
+               reg = <0 0xfe938000 0 0x8000>;
+               interrupts = <0 247 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp1_clks R8A7790_CLK_VSP1_DU1>;
+
+               renesas,has-lif;
+               renesas,has-lut;
+               renesas,#rpf = <4>;
+               renesas,#uds = <1>;
+               renesas,#wpf = <4>;
+       };
+
+       du: display@feb00000 {
+               compatible = "renesas,du-r8a7790";
+               reg = <0 0xfeb00000 0 0x70000>,
+                     <0 0xfeb90000 0 0x1c>,
+                     <0 0xfeb94000 0 0x1c>;
+               reg-names = "du", "lvds.0", "lvds.1";
+               interrupts = <0 256 IRQ_TYPE_LEVEL_HIGH>,
+                            <0 268 IRQ_TYPE_LEVEL_HIGH>,
+                            <0 269 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7790_CLK_DU0>,
+                        <&mstp7_clks R8A7790_CLK_DU1>,
+                        <&mstp7_clks R8A7790_CLK_DU2>,
+                        <&mstp7_clks R8A7790_CLK_LVDS0>,
+                        <&mstp7_clks R8A7790_CLK_LVDS1>;
+               clock-names = "du.0", "du.1", "du.2", "lvds.0", "lvds.1";
+               status = "disabled";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               du_out_rgb: endpoint {
+                               };
+                       };
+                       port@1 {
+                               reg = <1>;
+                               du_out_lvds0: endpoint {
+                               };
+                       };
+                       port@2 {
+                               reg = <2>;
+                               du_out_lvds1: endpoint {
+                               };
+                       };
+               };
+       };
+
        clocks {
                #address-cells = <2>;
                #size-cells = <2>;
                        #clock-cells = <0>;
                        clock-output-names = "sd2";
                };
-               sd3_clk: sd3_clk@e615007c {
+               sd3_clk: sd3_clk@e615026c {
                        compatible = "renesas,r8a7790-div6-clock", "renesas,cpg-div6-clock";
-                       reg = <0 0xe615007c 0 4>;
+                       reg = <0 0xe615026c 0 4>;
                        clocks = <&pll1_div2_clk>;
                        #clock-cells = <0>;
                        clock-output-names = "sd3";
index 07550e775e8047111e2f280ef8d94da993970ebb..4f4e56e5de3de4a12228f07dd7d467a0a9603888 100644 (file)
        };
 };
 
+&du {
+       pinctrl-0 = <&du_pins>;
+       pinctrl-names = "default";
+       status = "okay";
+
+       ports {
+               port@1 {
+                       lvds_connector: endpoint {
+                       };
+               };
+       };
+};
+
 &extal_clk {
        clock-frequency = <20000000>;
 };
 
 &pfc {
-       pinctrl-0 = <&du_pins>;
-       pinctrl-names = "default";
-
        i2c2_pins: i2c2 {
                renesas,groups = "i2c2";
                renesas,function = "i2c2";
index e06c11fa8698cfcc4cd5fc13ce1f905f554de01f..e4a7170f368b6cd6e66c62219f1d675d99b0ac30 100644 (file)
                status = "disabled";
        };
 
+       vsp1@fe928000 {
+               compatible = "renesas,vsp1";
+               reg = <0 0xfe928000 0 0x8000>;
+               interrupts = <0 267 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp1_clks R8A7791_CLK_VSP1_S>;
+
+               renesas,has-lut;
+               renesas,has-sru;
+               renesas,#rpf = <5>;
+               renesas,#uds = <3>;
+               renesas,#wpf = <4>;
+       };
+
+       vsp1@fe930000 {
+               compatible = "renesas,vsp1";
+               reg = <0 0xfe930000 0 0x8000>;
+               interrupts = <0 246 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp1_clks R8A7791_CLK_VSP1_DU0>;
+
+               renesas,has-lif;
+               renesas,has-lut;
+               renesas,#rpf = <4>;
+               renesas,#uds = <1>;
+               renesas,#wpf = <4>;
+       };
+
+       vsp1@fe938000 {
+               compatible = "renesas,vsp1";
+               reg = <0 0xfe938000 0 0x8000>;
+               interrupts = <0 247 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp1_clks R8A7791_CLK_VSP1_DU1>;
+
+               renesas,has-lif;
+               renesas,has-lut;
+               renesas,#rpf = <4>;
+               renesas,#uds = <1>;
+               renesas,#wpf = <4>;
+       };
+
+       du: display@feb00000 {
+               compatible = "renesas,du-r8a7791";
+               reg = <0 0xfeb00000 0 0x40000>,
+                     <0 0xfeb90000 0 0x1c>;
+               reg-names = "du", "lvds.0";
+               interrupts = <0 256 IRQ_TYPE_LEVEL_HIGH>,
+                            <0 268 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_DU0>,
+                        <&mstp7_clks R8A7791_CLK_DU1>,
+                        <&mstp7_clks R8A7791_CLK_LVDS0>;
+               clock-names = "du.0", "du.1", "lvds.0";
+               status = "disabled";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               du_out_rgb: endpoint {
+                               };
+                       };
+                       port@1 {
+                               reg = <1>;
+                               du_out_lvds0: endpoint {
+                               };
+                       };
+               };
+       };
+
        clocks {
                #address-cells = <2>;
                #size-cells = <2>;
diff --git a/arch/arm/boot/dts/r8a77xx-aa104xd12-panel.dtsi b/arch/arm/boot/dts/r8a77xx-aa104xd12-panel.dtsi
new file mode 100644 (file)
index 0000000..65cb50f
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Common file for the AA104XD12 panel connected to Renesas R-Car boards
+ *
+ * Copyright (C) 2014 Renesas Electronics Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/ {
+       panel {
+               compatible = "mitsubishi,aa104xd12", "panel-dpi";
+
+               width-mm = <210>;
+               height-mm = <158>;
+
+               panel-timing {
+                       /* 1024x768 @65Hz */
+                       clock-frequency = <65000000>;
+                       hactive = <1024>;
+                       vactive = <768>;
+                       hsync-len = <136>;
+                       hfront-porch = <20>;
+                       hback-porch = <160>;
+                       vfront-porch = <3>;
+                       vback-porch = <29>;
+                       vsync-len = <6>;
+               };
+
+               port {
+                       panel_in: endpoint {
+                               remote-endpoint = <&lvds_connector>;
+                       };
+               };
+       };
+};
+
+&lvds_connector {
+       remote-endpoint = <&panel_in>;
+};
index 7997dc9863ed2cb7dd1cfc49c6f5bfcc70304d45..883878b32971a915d6d841ecc404cba1d9dd6056 100644 (file)
@@ -12,5 +12,5 @@
 #include "sama5d3_uart.dtsi"
 
 / {
-       compatible = "atmel,samad31", "atmel,sama5d3", "atmel,sama5";
+       compatible = "atmel,sama5d31", "atmel,sama5d3", "atmel,sama5";
 };
index 39f832253cafb7613ed46bed3d2cb3d19f0a1b1f..4b4434aca351208553a857d0b9a662c4a977b2e3 100644 (file)
@@ -10,5 +10,5 @@
 #include "sama5d3_gmac.dtsi"
 
 / {
-       compatible = "atmel,samad33", "atmel,sama5d3", "atmel,sama5";
+       compatible = "atmel,sama5d33", "atmel,sama5d3", "atmel,sama5";
 };
index 89cda2c0da39268f548533538fb76a9d08652a6e..aa01573fdee9366dbb23086092e8dde3d7970d45 100644 (file)
@@ -12,5 +12,5 @@
 #include "sama5d3_mci2.dtsi"
 
 / {
-       compatible = "atmel,samad34", "atmel,sama5d3", "atmel,sama5";
+       compatible = "atmel,sama5d34", "atmel,sama5d3", "atmel,sama5";
 };
index d20cd71b5f0e7f142c9b83ce81b5f9d5a7e73e6a..16c39f4c96a40841d07bac8ffce1d768d1c69cc4 100644 (file)
@@ -14,5 +14,5 @@
 #include "sama5d3_tcb1.dtsi"
 
 / {
-       compatible = "atmel,samad35", "atmel,sama5d3", "atmel,sama5";
+       compatible = "atmel,sama5d35", "atmel,sama5d3", "atmel,sama5";
 };
index db58cad6acd32633b88bf88a0589885bfa58ed9b..e85139ef40aff61a1da90f76d963ed015e9d3fc9 100644 (file)
@@ -16,5 +16,5 @@
 #include "sama5d3_uart.dtsi"
 
 / {
-       compatible = "atmel,samad36", "atmel,sama5d3", "atmel,sama5";
+       compatible = "atmel,sama5d36", "atmel,sama5d3", "atmel,sama5";
 };
index 962dc28dc37b9bbc4a3427c4e127d26fc689bf63..cfcd200b0c177967f91d9c26e4372d4f1f514b27 100644 (file)
@@ -8,7 +8,7 @@
  */
 
 / {
-       compatible = "atmel,samad3xcm", "atmel,sama5d3", "atmel,sama5";
+       compatible = "atmel,sama5d3xcm", "atmel,sama5d3", "atmel,sama5";
 
        chosen {
                bootargs = "console=ttyS0,115200 rootfstype=ubifs ubi.mtd=5 root=ubi0:rootfs";
index 543f895d18d3c870446f52085095221566ef83a3..2e652e2339e9a1caf9c4abead9d7b958cc7515a1 100644 (file)
                        clocks = <&ahb1_gates 6>;
                        resets = <&ahb1_rst 6>;
                        #dma-cells = <1>;
+
+                       /* DMA controller requires AHB1 clocked from PLL6 */
+                       assigned-clocks = <&ahb1_mux>;
+                       assigned-clock-parents = <&pll6>;
                };
 
                mmc0: mmc@01c0f000 {
index 5c21d216515a3d5a06b76a4b449cc72ef208b6af..8b7aa0dcdc6ee4d4a04100650c1ac33cebcd6762 100644 (file)
@@ -15,6 +15,7 @@
        aliases {
                rtc0 = "/i2c@7000d000/tps65913@58";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uartd;
        };
 
        memory {
index c7c6825f11fbb1b902b6f230eb62d8d8f88077a6..38acf78d7815fab2ab14842503a4bf79c040f9ed 100644 (file)
                linux,initrd-end = <0x82800000>;
        };
 
+       aliases {
+               serial0 = &uartd;
+       };
+
        firmware {
                trusted-foundations {
                        compatible = "tlm,trusted-foundations";
                                                regulator-name = "vddio-sdmmc3";
                                                regulator-min-microvolt = <1800000>;
                                                regulator-max-microvolt = <3300000>;
-                                               regulator-always-on;
-                                               regulator-boot-on;
                                        };
 
                                        ldousb {
        sdhci@78000400 {
                status = "okay";
                bus-width = <4>;
-               vmmc-supply = <&vddio_sdmmc3>;
+               vqmmc-supply = <&vddio_sdmmc3>;
                cd-gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_LOW>;
                power-gpios = <&gpio TEGRA_GPIO(H, 0) GPIO_ACTIVE_HIGH>;
        };
        sdhci@78000600 {
                status = "okay";
                bus-width = <8>;
-               vmmc-supply = <&vdd_1v8>;
                non-removable;
        };
 
index 96366214563542479db657f80b12a28918e30824..f91c2c9b2f9431aef1e1611ba2eaf064a40aed7d 100644 (file)
                linux,initrd-end = <0x82800000>;
        };
 
+       aliases {
+               serial0 = &uartd;
+       };
+
        firmware {
                trusted-foundations {
                        compatible = "tlm,trusted-foundations";
        sdhci@78000600 {
                status = "okay";
                bus-width = <8>;
-               vmmc-supply = <&vdd_1v8>;
                non-removable;
        };
 
index 2ca9c1807f72374bb176aada1203f6d9bc88e220..222f3b3f4dd5c4f852259349228db76e89c7ac27 100644 (file)
@@ -9,13 +9,6 @@
        compatible = "nvidia,tegra114";
        interrupt-parent = <&gic>;
 
-       aliases {
-               serial0 = &uarta;
-               serial1 = &uartb;
-               serial2 = &uartc;
-               serial3 = &uartd;
-       };
-
        host1x@50000000 {
                compatible = "nvidia,tegra114-host1x", "simple-bus";
                reg = <0x50000000 0x00028000>;
index 029c9a0215413355d3ce7c081a3ac5bd00fcd3d7..51b373ff106555edf302c9066809ea4abbd25c5c 100644 (file)
@@ -10,6 +10,7 @@
        aliases {
                rtc0 = "/i2c@0,7000d000/pmic@40";
                rtc1 = "/rtc@0,7000e000";
+               serial0 = &uartd;
        };
 
        memory {
index 7d0784ce4c748183083fa4ab5947b9adfb083d72..53181d31024713796897f5980cf9994339eb691f 100644 (file)
@@ -10,6 +10,7 @@
        aliases {
                rtc0 = "/i2c@0,7000d000/pmic@40";
                rtc1 = "/rtc@0,7000e000";
+               serial0 = &uarta;
        };
 
        memory {
index 13008858e96754dda8de3f838add6e0b6d4fb911..5c3f7813360d2a59bffcc463f059b2047440fabf 100644 (file)
@@ -10,6 +10,7 @@
        aliases {
                rtc0 = "/i2c@0,7000d000/pmic@40";
                rtc1 = "/rtc@0,7000e000";
+               serial0 = &uarta;
        };
 
        memory {
index 478c555ebd96bd0897bae7d3d0e7fa368573cba5..df2b06b299851a85533243a96126e49e43813066 100644 (file)
         * the APB DMA based serial driver, the comptible is
         * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart".
         */
-       serial@0,70006000 {
+       uarta: serial@0,70006000 {
                compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
                reg = <0x0 0x70006000 0x0 0x40>;
                reg-shift = <2>;
                status = "disabled";
        };
 
-       serial@0,70006040 {
+       uartb: serial@0,70006040 {
                compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
                reg = <0x0 0x70006040 0x0 0x40>;
                reg-shift = <2>;
                status = "disabled";
        };
 
-       serial@0,70006200 {
+       uartc: serial@0,70006200 {
                compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
                reg = <0x0 0x70006200 0x0 0x40>;
                reg-shift = <2>;
                status = "disabled";
        };
 
-       serial@0,70006300 {
+       uartd: serial@0,70006300 {
                compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
                reg = <0x0 0x70006300 0x0 0x40>;
                reg-shift = <2>;
index a37279af687c6a436ba5308c6139a5c3c8be9014..b926a07b944303fb24468d6899bc9324c7c956bb 100644 (file)
@@ -10,6 +10,7 @@
        aliases {
                rtc0 = "/i2c@7000d000/tps6586x@34";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uartd;
        };
 
        memory {
index 8cfb83f42e1fd87ff608b171a2c404942975b0ff..1dd7d7bfdfcc25e3d3fcb35e6f17c4ccb95b2b5f 100644 (file)
@@ -6,6 +6,11 @@
        model = "Toradex Colibri T20 512MB on Iris";
        compatible = "toradex,iris", "toradex,colibri_t20-512", "nvidia,tegra20";
 
+       aliases {
+               serial0 = &uarta;
+               serial1 = &uartd;
+       };
+
        host1x@50000000 {
                hdmi@54280000 {
                        status = "okay";
index 1b7c56b33acae6f2c6c4b1da3154d6c92aebb96a..9b87526ab0b70fad25125a4f5764d418243aa50c 100644 (file)
@@ -6,6 +6,10 @@
        model = "Avionic Design Medcom-Wide board";
        compatible = "ad,medcom-wide", "ad,tamonten", "nvidia,tegra20";
 
+       aliases {
+               serial0 = &uartd;
+       };
+
        pwm@7000a000 {
                status = "okay";
        };
index d4438e30de456c70047457f6ee974ac31a6f686f..ed7e1009326cd748628c5422849bfffad2a3b21e 100644 (file)
@@ -10,6 +10,8 @@
        aliases {
                rtc0 = "/i2c@7000d000/tps6586x@34";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uarta;
+               serial1 = &uartc;
        };
 
        memory {
index a1d4bf9895d74c8b0efb7d2fe948a9fcd602b25e..ea282c7c0ca5645394a28e313fbad1deac339882 100644 (file)
@@ -10,6 +10,7 @@
        aliases {
                rtc0 = "/i2c@7000d000/tps6586x@34";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uartd;
        };
 
        memory {
index 80e7d386ce3452e3776e70771233eea336a81e98..13d4e6185275f43c3f74fbeb0397d97b0348f9fb 100644 (file)
@@ -7,6 +7,7 @@
        aliases {
                rtc0 = "/i2c@7000d000/tps6586x@34";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uartd;
        };
 
        memory {
index 5ad87979ab13ff68527f3b6c8a1ba9bb51e2bda5..d99af4ef9c6444f73e7044c7447557fbca4b4ee1 100644 (file)
@@ -10,6 +10,7 @@
        aliases {
                rtc0 = "/i2c@7000c500/rtc@56";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uarta;
        };
 
        memory {
index ca8484cccddccc32313649e1abfd644b527f50a3..04c58e9ca490bb8bf205b4608d371bbecf5f3f61 100644 (file)
@@ -10,6 +10,7 @@
        aliases {
                rtc0 = "/i2c@7000d000/tps6586x@34";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uartd;
        };
 
        memory {
index 1843725785c90f1f2518bade455af7dead4c6ec8..340d81108df1a232fcefe64c81bdb9372a4c814c 100644 (file)
@@ -10,6 +10,7 @@
        aliases {
                rtc0 = "/i2c@7000d000/max8907@3c";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uarta;
        };
 
        memory {
index 3b374c49d04d962478aebc62799bcce5b31aa6dd..8acf5d85c99da5b0077f6ce1b80c2ba08d45865c 100644 (file)
@@ -9,14 +9,6 @@
        compatible = "nvidia,tegra20";
        interrupt-parent = <&intc>;
 
-       aliases {
-               serial0 = &uarta;
-               serial1 = &uartb;
-               serial2 = &uartc;
-               serial3 = &uartd;
-               serial4 = &uarte;
-       };
-
        host1x@50000000 {
                compatible = "nvidia,tegra20-host1x", "simple-bus";
                reg = <0x50000000 0x00024000>;
index 45d40f024585d95a53928e36d351db05aa514592..6236bdecb48ba08891896f6967199aba6e2a6680 100644 (file)
                rtc0 = "/i2c@7000c000/rtc@68";
                rtc1 = "/i2c@7000d000/tps65911@2d";
                rtc2 = "/rtc@7000e000";
+               serial0 = &uarta;
+               serial1 = &uartb;
+               serial2 = &uartc;
+               serial3 = &uartd;
        };
 
        pcie-controller@00003000 {
index cee8f2246fdb2467fbde27bae612a0752d7f6da2..6b157eeabcc5c9b07009c4d91a291122b3732887 100644 (file)
@@ -9,6 +9,7 @@
        aliases {
                rtc0 = "/i2c@7000d000/tps65911@2d";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uarta;
        };
 
        memory {
index 20637954624425795453cc78699e674276de5430..a1b682ea01bd70ab94025cd12a4d5205d45f9db7 100644 (file)
@@ -30,6 +30,8 @@
        aliases {
                rtc0 = "/i2c@7000d000/tps65911@2d";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uarta;
+               serial1 = &uartc;
        };
 
        memory {
index 7793abd5bef132388100e8a0c1aa4ca79e7ed803..4d3ddc58564126433410c17b5c3ef569532a9aa1 100644 (file)
@@ -10,6 +10,9 @@
                rtc0 = "/i2c@7000c000/rtc@68";
                rtc1 = "/i2c@7000d000/tps65911@2d";
                rtc2 = "/rtc@7000e000";
+               serial0 = &uarta;
+               serial1 = &uartb;
+               serial2 = &uartd;
        };
 
        host1x@50000000 {
index aa6ccea13d308036e853b58bafe840d7c3450e87..b270b9e3d4554407157be95cadd71c9b5e902eeb 100644 (file)
@@ -9,14 +9,6 @@
        compatible = "nvidia,tegra30";
        interrupt-parent = <&intc>;
 
-       aliases {
-               serial0 = &uarta;
-               serial1 = &uartb;
-               serial2 = &uartc;
-               serial3 = &uartd;
-               serial4 = &uarte;
-       };
-
        pcie-controller@00003000 {
                compatible = "nvidia,tegra30-pcie";
                device_type = "pci";
index 3fd1b74e1216e1b028751ef64e4f84e7af65bcf3..de1b453c2932f7e4cb7ad3d6d4eb227c34357afd 100644 (file)
 
 };
 
+&esdhc1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_esdhc1>;
+       bus-width = <4>;
+       status = "okay";
+};
+
 &fec1 {
        phy-mode = "rmii";
        pinctrl-names = "default";
 
 &iomuxc {
        vf610-cosmic {
+               pinctrl_esdhc1: esdhc1grp {
+                       fsl,pins = <
+                               VF610_PAD_PTA24__ESDHC1_CLK     0x31ef
+                               VF610_PAD_PTA25__ESDHC1_CMD     0x31ef
+                               VF610_PAD_PTA26__ESDHC1_DAT0    0x31ef
+                               VF610_PAD_PTA27__ESDHC1_DAT1    0x31ef
+                               VF610_PAD_PTA28__ESDHC1_DATA2   0x31ef
+                               VF610_PAD_PTA29__ESDHC1_DAT3    0x31ef
+                               VF610_PAD_PTB28__GPIO_98        0x219d
+                       >;
+               };
+
                pinctrl_fec1: fec1grp {
                        fsl,pins = <
                                VF610_PAD_PTC9__ENET_RMII1_MDC          0x30d2
index e1f51ca127fe835664f0609b19a1ef428de62ca8..0429bbd89fba78ab2da1fbc831b7fa0118acbf4d 100644 (file)
        };
 };
 
+&clkc {
+       fclk-enable = <0xf>;
+};
+
 &gem0 {
        status = "okay";
        phy-mode = "rgmii-id";
index d86771abbf57a3760e65c4718a4fbfc6bad8359b..72041f002b7ea46447a3b060e4e3c96b6b6f8c85 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/edma.h>
+#include <linux/dma-mapping.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_dma.h>
@@ -1623,6 +1624,11 @@ static int edma_probe(struct platform_device *pdev)
        struct device_node      *node = pdev->dev.of_node;
        struct device           *dev = &pdev->dev;
        int                     ret;
+       struct platform_device_info edma_dev_info = {
+               .name = "edma-dma-engine",
+               .dma_mask = DMA_BIT_MASK(32),
+               .parent = &pdev->dev,
+       };
 
        if (node) {
                /* Check if this is a second instance registered */
@@ -1793,6 +1799,9 @@ static int edma_probe(struct platform_device *pdev)
                        edma_write_array(j, EDMA_QRAE, i, 0x0);
                }
                arch_num_cc++;
+
+               edma_dev_info.id = j;
+               platform_device_register_full(&edma_dev_info);
        }
 
        return 0;
index 72058b8a6f4d4ccce4a8e5a740f82ba0321ef561..e21ef830a48365a06db80d0127fa5a3f55f17f71 100644 (file)
@@ -142,11 +142,13 @@ CONFIG_MMC_DW_IDMAC=y
 CONFIG_MMC_DW_EXYNOS=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_MAX77802=y
 CONFIG_RTC_DRV_S5M=y
 CONFIG_RTC_DRV_S3C=y
 CONFIG_DMADEVICES=y
 CONFIG_PL330_DMA=y
 CONFIG_COMMON_CLK_MAX77686=y
+CONFIG_COMMON_CLK_MAX77802=y
 CONFIG_COMMON_CLK_S2MPS11=y
 CONFIG_EXYNOS_IOMMU=y
 CONFIG_IIO=y
index e688741c89aae2d24ec42b6e95120bf78e32b317..e6b0007355f883eaca70e4ce4df5b6fcc9818ae4 100644 (file)
@@ -97,6 +97,7 @@ CONFIG_SERIAL_IMX_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_IMX=y
+CONFIG_SPI=y
 CONFIG_SPI_IMX=y
 CONFIG_SPI_SPIDEV=y
 CONFIG_GPIO_SYSFS=y
index 8fca6e276b6949e73e036aac03a1d84f9a2b2760..6790f1b3f3a1d988da100554384a125cb63a4345 100644 (file)
@@ -158,6 +158,7 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_ALGOPCF=m
 CONFIG_I2C_ALGOPCA=m
 CONFIG_I2C_IMX=y
+CONFIG_SPI=y
 CONFIG_SPI_IMX=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_MC9S08DZ60=y
index f1dc7fc668f33321416f8ef6c14f335f91914481..9d7a32f93fcf2e93a66b42b3352b7b3119d9ec0e 100644 (file)
@@ -217,6 +217,7 @@ CONFIG_I2C_CADENCE=y
 CONFIG_I2C_DESIGNWARE_PLATFORM=y
 CONFIG_I2C_EXYNOS5=y
 CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_S3C2410=y
 CONFIG_I2C_SIRF=y
 CONFIG_I2C_TEGRA=y
 CONFIG_I2C_ST=y
@@ -235,6 +236,7 @@ CONFIG_SPI_TEGRA20_SLINK=y
 CONFIG_SPI_XILINX=y
 CONFIG_PINCTRL_AS3722=y
 CONFIG_PINCTRL_PALMAS=y
+CONFIG_PINCTRL_APQ8084=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_GENERIC_PLATFORM=y
 CONFIG_GPIO_DWAPB=y
@@ -411,6 +413,7 @@ CONFIG_NVEC_POWER=y
 CONFIG_NVEC_PAZ00=y
 CONFIG_QCOM_GSBI=y
 CONFIG_COMMON_CLK_QCOM=y
+CONFIG_APQ_MMCC_8084=y
 CONFIG_MSM_GCC_8660=y
 CONFIG_MSM_MMCC_8960=y
 CONFIG_MSM_MMCC_8974=y
index 16e719c268dd77407f8ffb00fc687036d319a2b1..b3f86670d2eb752d0186e7cd89b34b1fa414cf77 100644 (file)
@@ -86,7 +86,6 @@ CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 # CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
 CONFIG_NETFILTER=y
 CONFIG_CAN=m
 CONFIG_CAN_C_CAN=m
@@ -112,6 +111,7 @@ CONFIG_MTD_OOPS=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ECC_BCH=y
 CONFIG_MTD_NAND_OMAP2=y
 CONFIG_MTD_ONENAND=y
 CONFIG_MTD_ONENAND_VERIFY_WRITE=y
@@ -317,7 +317,7 @@ CONFIG_EXT4_FS=y
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA=y
 CONFIG_QFMT_V2=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS4_FS=m
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
index d7a5855a5db89a550f967125f5e300c8a1152f54..a2956c3112f14abd66c5ed586d621d8e7dd548b8 100644 (file)
@@ -1,5 +1,6 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -11,23 +12,17 @@ CONFIG_PROFILING=y
 CONFIG_OPROFILE=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_HOTPLUG=y
 # CONFIG_LBDAF is not set
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
 CONFIG_ARCH_SOCFPGA=y
-CONFIG_MACH_SOCFPGA_CYCLONE5=y
 CONFIG_ARM_THUMBEE=y
-# CONFIG_ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA is not set
-# CONFIG_CACHE_L2X0 is not set
-CONFIG_HIGH_RES_TIMERS=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
 CONFIG_AEABI=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE=""
 CONFIG_VFP=y
 CONFIG_NEON=y
 CONFIG_NET=y
@@ -41,38 +36,30 @@ CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
+CONFIG_IPV6=y
+CONFIG_NETWORK_PHY_TIMESTAMPING=y
+CONFIG_VLAN_8021Q=y
+CONFIG_VLAN_8021Q_GVRP=y
 CONFIG_CAN=y
-CONFIG_CAN_RAW=y
-CONFIG_CAN_BCM=y
-CONFIG_CAN_GW=y
-CONFIG_CAN_DEV=y
-CONFIG_CAN_CALC_BITTIMING=y
 CONFIG_CAN_C_CAN=y
 CONFIG_CAN_C_CAN_PLATFORM=y
 CONFIG_CAN_DEBUG_DEVICES=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
-CONFIG_PROC_DEVICETREE=y
+CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=2
 CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SRAM=y
 CONFIG_SCSI=y
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_NETDEVICES=y
 CONFIG_STMMAC_ETH=y
+CONFIG_DWMAC_SOCFPGA=y
 CONFIG_MICREL_PHY=y
-# CONFIG_STMMAC_PHY_ID_ZERO_WORKAROUND is not set
 CONFIG_INPUT_EVDEV=y
-CONFIG_DWMAC_SOCFPGA=y
-CONFIG_PPS=y
-CONFIG_NETWORK_PHY_TIMESTAMPING=y
-CONFIG_PTP_1588_CLOCK=y
-CONFIG_VLAN_8021Q=y
-CONFIG_VLAN_8021Q_GVRP=y
-CONFIG_GARP=y
-CONFIG_IPV6=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_AMBAKMI=y
 CONFIG_LEGACY_PTY_COUNT=16
@@ -81,45 +68,43 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=2
 CONFIG_SERIAL_8250_RUNTIME_UARTS=2
 CONFIG_SERIAL_8250_DW=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_DWAPB=y
-# CONFIG_RTC_HCTOSYS is not set
+CONFIG_PMBUS=y
+CONFIG_SENSORS_LTC2978=y
+CONFIG_SENSORS_LTC2978_REGULATOR=y
 CONFIG_WATCHDOG=y
 CONFIG_DW_WATCHDOG=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_USB=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_DWC2_HOST=y
+CONFIG_MMC=y
+CONFIG_MMC_DW=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
 CONFIG_EXT3_FS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-# CONFIG_DNOTIFY is not set
-# CONFIG_INOTIFY_USER is not set
-CONFIG_FHANDLE=y
+CONFIG_EXT4_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_NTFS_FS=y
 CONFIG_NTFS_RW=y
 CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DETECT_HUNG_TASK=y
 # CONFIG_SCHED_DEBUG is not set
-CONFIG_DEBUG_INFO=y
 CONFIG_ENABLE_DEFAULT_TRACERS=y
 CONFIG_DEBUG_USER=y
 CONFIG_XZ_DEC=y
-CONFIG_I2C=y
-CONFIG_I2C_DESIGNWARE_CORE=y
-CONFIG_I2C_DESIGNWARE_PLATFORM=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_MMC=y
-CONFIG_MMC_DW=y
-CONFIG_PM=y
-CONFIG_SUSPEND=y
-CONFIG_MMC_UNSAFE_RESUME=y
-CONFIG_USB=y
-CONFIG_USB_DWC2=y
-CONFIG_USB_DWC2_HOST=y
-CONFIG_USB_DWC2_PLATFORM=y
index fc44d3761f9e7d36eb8ff4911ff0120a63e7584f..ce73ab6354149f8c490319bdeb6acdbc92cd784c 100644 (file)
@@ -44,16 +44,6 @@ struct cpu_context_save {
        __u32   extra[2];               /* Xscale 'acc' register, etc */
 };
 
-struct arm_restart_block {
-       union {
-               /* For user cache flushing */
-               struct {
-                       unsigned long start;
-                       unsigned long end;
-               } cache;
-       };
-};
-
 /*
  * low level task data that entry.S needs immediate access to.
  * __switch_to() assumes cpu_context follows immediately after cpu_domain.
@@ -79,7 +69,6 @@ struct thread_info {
        unsigned long           thumbee_state;  /* ThumbEE Handler Base register */
 #endif
        struct restart_block    restart_block;
-       struct arm_restart_block        arm_restart_block;
 };
 
 #define INIT_THREAD_INFO(tsk)                                          \
index 0c8b10801d36ad6a25806892ea283c7b93d28f61..9f5d81881eb6da9bdd599a7321cf83fbdcb0a0e2 100644 (file)
@@ -533,8 +533,6 @@ static int bad_syscall(int n, struct pt_regs *regs)
        return regs->ARM_r0;
 }
 
-static long do_cache_op_restart(struct restart_block *);
-
 static inline int
 __do_cache_op(unsigned long start, unsigned long end)
 {
@@ -543,24 +541,8 @@ __do_cache_op(unsigned long start, unsigned long end)
        do {
                unsigned long chunk = min(PAGE_SIZE, end - start);
 
-               if (signal_pending(current)) {
-                       struct thread_info *ti = current_thread_info();
-
-                       ti->restart_block = (struct restart_block) {
-                               .fn     = do_cache_op_restart,
-                       };
-
-                       ti->arm_restart_block = (struct arm_restart_block) {
-                               {
-                                       .cache = {
-                                               .start  = start,
-                                               .end    = end,
-                                       },
-                               },
-                       };
-
-                       return -ERESTART_RESTARTBLOCK;
-               }
+               if (fatal_signal_pending(current))
+                       return 0;
 
                ret = flush_cache_user_range(start, start + chunk);
                if (ret)
@@ -573,15 +555,6 @@ __do_cache_op(unsigned long start, unsigned long end)
        return 0;
 }
 
-static long do_cache_op_restart(struct restart_block *unused)
-{
-       struct arm_restart_block *restart_block;
-
-       restart_block = &current_thread_info()->arm_restart_block;
-       return __do_cache_op(restart_block->cache.start,
-                            restart_block->cache.end);
-}
-
 static inline int
 do_cache_op(unsigned long start, unsigned long end, int flags)
 {
index 57a403a5c22bf9e174ec88a0377b4ab07c3b0a29..8664ff17cbbeaf531b03174e1524cc00a6e86849 100644 (file)
@@ -197,7 +197,8 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
        pgd = pgdp + pgd_index(addr);
        do {
                next = kvm_pgd_addr_end(addr, end);
-               unmap_puds(kvm, pgd, addr, next);
+               if (!pgd_none(*pgd))
+                       unmap_puds(kvm, pgd, addr, next);
        } while (pgd++, addr = next, addr != end);
 }
 
@@ -834,6 +835,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
        return kvm_vcpu_dabt_iswrite(vcpu);
 }
 
+static bool kvm_is_device_pfn(unsigned long pfn)
+{
+       return !pfn_valid(pfn);
+}
+
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                          struct kvm_memory_slot *memslot, unsigned long hva,
                          unsigned long fault_status)
@@ -904,7 +910,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (is_error_pfn(pfn))
                return -EFAULT;
 
-       if (kvm_is_mmio_pfn(pfn))
+       if (kvm_is_device_pfn(pfn))
                mem_type = PAGE_S2_DEVICE;
 
        spin_lock(&kvm->mmu_lock);
index a1781847505075c48b3b91fb206bc74441c88cc6..409637254594af1392e89cd3436fb330352278b5 100644 (file)
 #define PFD_PLL1_BASE          (anatop_base + 0x2b0)
 #define PFD_PLL2_BASE          (anatop_base + 0x100)
 #define PFD_PLL3_BASE          (anatop_base + 0xf0)
+#define PLL1_CTRL              (anatop_base + 0x270)
+#define PLL2_CTRL              (anatop_base + 0x30)
 #define PLL3_CTRL              (anatop_base + 0x10)
+#define PLL4_CTRL              (anatop_base + 0x70)
+#define PLL5_CTRL              (anatop_base + 0xe0)
+#define PLL6_CTRL              (anatop_base + 0xa0)
 #define PLL7_CTRL              (anatop_base + 0x20)
+#define ANA_MISC1              (anatop_base + 0x160)
 
 static void __iomem *anatop_base;
 static void __iomem *ccm_base;
@@ -67,25 +73,34 @@ static void __iomem *ccm_base;
 /* sources for multiplexer clocks, this is used multiple times */
 static const char *fast_sels[] = { "firc", "fxosc", };
 static const char *slow_sels[] = { "sirc_32k", "sxosc", };
-static const char *pll1_sels[] = { "pll1_main", "pll1_pfd1", "pll1_pfd2", "pll1_pfd3", "pll1_pfd4", };
-static const char *pll2_sels[] = { "pll2_main", "pll2_pfd1", "pll2_pfd2", "pll2_pfd3", "pll2_pfd4", };
-static const char *sys_sels[]  = { "fast_clk_sel", "slow_clk_sel", "pll2_pfd_sel", "pll2_main", "pll1_pfd_sel", "pll3_main", };
+static const char *pll1_sels[] = { "pll1_sys", "pll1_pfd1", "pll1_pfd2", "pll1_pfd3", "pll1_pfd4", };
+static const char *pll2_sels[] = { "pll2_bus", "pll2_pfd1", "pll2_pfd2", "pll2_pfd3", "pll2_pfd4", };
+static const char *pll_bypass_src_sels[] = { "fast_clk_sel", "lvds1_in", };
+static const char *pll1_bypass_sels[] = { "pll1", "pll1_bypass_src", };
+static const char *pll2_bypass_sels[] = { "pll2", "pll2_bypass_src", };
+static const char *pll3_bypass_sels[] = { "pll3", "pll3_bypass_src", };
+static const char *pll4_bypass_sels[] = { "pll4", "pll4_bypass_src", };
+static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", };
+static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", };
+static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
+static const char *sys_sels[]  = { "fast_clk_sel", "slow_clk_sel", "pll2_pfd_sel", "pll2_bus", "pll1_pfd_sel", "pll3_usb_otg", };
 static const char *ddr_sels[]  = { "pll2_pfd2", "sys_sel", };
 static const char *rmii_sels[] = { "enet_ext", "audio_ext", "enet_50m", "enet_25m", };
 static const char *enet_ts_sels[]      = { "enet_ext", "fxosc", "audio_ext", "usb", "enet_ts", "enet_25m", "enet_50m", };
-static const char *esai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_main_div", };
-static const char *sai_sels[]  = { "audio_ext", "mlb", "spdif_rx", "pll4_main_div", };
+static const char *esai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_audio_div", };
+static const char *sai_sels[]  = { "audio_ext", "mlb", "spdif_rx", "pll4_audio_div", };
 static const char *nfc_sels[]  = { "platform_bus", "pll1_pfd1", "pll3_pfd1", "pll3_pfd3", };
-static const char *qspi_sels[] = { "pll3_main", "pll3_pfd4", "pll2_pfd4", "pll1_pfd4", };
-static const char *esdhc_sels[]        = { "pll3_main", "pll3_pfd3", "pll1_pfd3", "platform_bus", };
-static const char *dcu_sels[]  = { "pll1_pfd2", "pll3_main", };
+static const char *qspi_sels[] = { "pll3_usb_otg", "pll3_pfd4", "pll2_pfd4", "pll1_pfd4", };
+static const char *esdhc_sels[]        = { "pll3_usb_otg", "pll3_pfd3", "pll1_pfd3", "platform_bus", };
+static const char *dcu_sels[]  = { "pll1_pfd2", "pll3_usb_otg", };
 static const char *gpu_sels[]  = { "pll2_pfd2", "pll3_pfd2", };
-static const char *vadc_sels[] = { "pll6_main_div", "pll3_main_div", "pll3_main", };
+static const char *vadc_sels[] = { "pll6_video_div", "pll3_usb_otg_div", "pll3_usb_otg", };
 /* FTM counter clock source, not module clock */
 static const char *ftm_ext_sels[]      = {"sirc_128k", "sxosc", "fxosc_half", "audio_ext", };
 static const char *ftm_fix_sels[]      = { "sxosc", "ipg_bus", };
 
-static struct clk_div_table pll4_main_div_table[] = {
+
+static struct clk_div_table pll4_audio_div_table[] = {
        { .val = 0, .div = 1 },
        { .val = 1, .div = 2 },
        { .val = 2, .div = 6 },
@@ -120,6 +135,9 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
        clk[VF610_CLK_AUDIO_EXT] = imx_obtain_fixed_clock("audio_ext", 0);
        clk[VF610_CLK_ENET_EXT] = imx_obtain_fixed_clock("enet_ext", 0);
 
+       /* Clock source from external clock via LVDs PAD */
+       clk[VF610_CLK_ANACLK1] = imx_obtain_fixed_clock("anaclk1", 0);
+
        clk[VF610_CLK_FXOSC_HALF] = imx_clk_fixed_factor("fxosc_half", "fxosc", 1, 2);
 
        np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop");
@@ -133,31 +151,63 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
        clk[VF610_CLK_SLOW_CLK_SEL] = imx_clk_mux("slow_clk_sel", CCM_CCSR, 4, 1, slow_sels, ARRAY_SIZE(slow_sels));
        clk[VF610_CLK_FASK_CLK_SEL] = imx_clk_mux("fast_clk_sel", CCM_CCSR, 5, 1, fast_sels, ARRAY_SIZE(fast_sels));
 
-       clk[VF610_CLK_PLL1_MAIN] = imx_clk_fixed_factor("pll1_main", "fast_clk_sel", 22, 1);
-       clk[VF610_CLK_PLL1_PFD1] = imx_clk_pfd("pll1_pfd1", "pll1_main", PFD_PLL1_BASE, 0);
-       clk[VF610_CLK_PLL1_PFD2] = imx_clk_pfd("pll1_pfd2", "pll1_main", PFD_PLL1_BASE, 1);
-       clk[VF610_CLK_PLL1_PFD3] = imx_clk_pfd("pll1_pfd3", "pll1_main", PFD_PLL1_BASE, 2);
-       clk[VF610_CLK_PLL1_PFD4] = imx_clk_pfd("pll1_pfd4", "pll1_main", PFD_PLL1_BASE, 3);
-
-       clk[VF610_CLK_PLL2_MAIN] = imx_clk_fixed_factor("pll2_main", "fast_clk_sel", 22, 1);
-       clk[VF610_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1", "pll2_main", PFD_PLL2_BASE, 0);
-       clk[VF610_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2", "pll2_main", PFD_PLL2_BASE, 1);
-       clk[VF610_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3", "pll2_main", PFD_PLL2_BASE, 2);
-       clk[VF610_CLK_PLL2_PFD4] = imx_clk_pfd("pll2_pfd4", "pll2_main", PFD_PLL2_BASE, 3);
-
-       clk[VF610_CLK_PLL3_MAIN] = imx_clk_fixed_factor("pll3_main", "fast_clk_sel", 20, 1);
-       clk[VF610_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1", "pll3_main", PFD_PLL3_BASE, 0);
-       clk[VF610_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2", "pll3_main", PFD_PLL3_BASE, 1);
-       clk[VF610_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3", "pll3_main", PFD_PLL3_BASE, 2);
-       clk[VF610_CLK_PLL3_PFD4] = imx_clk_pfd("pll3_pfd4", "pll3_main", PFD_PLL3_BASE, 3);
-
-       clk[VF610_CLK_PLL4_MAIN] = imx_clk_fixed_factor("pll4_main", "fast_clk_sel", 25, 1);
-       /* Enet pll: fixed 50Mhz */
-       clk[VF610_CLK_PLL5_MAIN] = imx_clk_fixed_factor("pll5_main", "fast_clk_sel", 125, 6);
-       /* pll6: default 960Mhz */
-       clk[VF610_CLK_PLL6_MAIN] = imx_clk_fixed_factor("pll6_main", "fast_clk_sel", 40, 1);
-       /* pll7: USB1 PLL at 480MHz */
-       clk[VF610_CLK_PLL7_MAIN] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_main", "fast_clk_sel", PLL7_CTRL, 0x2);
+       clk[VF610_CLK_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", PLL1_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+       clk[VF610_CLK_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", PLL2_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+       clk[VF610_CLK_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", PLL3_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+       clk[VF610_CLK_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", PLL4_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+       clk[VF610_CLK_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", PLL5_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+       clk[VF610_CLK_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", PLL6_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+       clk[VF610_CLK_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", PLL7_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+
+       clk[VF610_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll1", "pll1_bypass_src", PLL1_CTRL, 0x1);
+       clk[VF610_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", PLL2_CTRL, 0x1);
+       clk[VF610_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB,     "pll3", "pll3_bypass_src", PLL3_CTRL, 0x1);
+       clk[VF610_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV,      "pll4", "pll4_bypass_src", PLL4_CTRL, 0x7f);
+       clk[VF610_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_ENET,    "pll5", "pll5_bypass_src", PLL5_CTRL, 0x3);
+       clk[VF610_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_AV,      "pll6", "pll6_bypass_src", PLL6_CTRL, 0x7f);
+       clk[VF610_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB,     "pll7", "pll7_bypass_src", PLL7_CTRL, 0x1);
+
+       clk[VF610_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", PLL1_CTRL, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT);
+       clk[VF610_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", PLL2_CTRL, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT);
+       clk[VF610_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", PLL3_CTRL, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT);
+       clk[VF610_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", PLL4_CTRL, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT);
+       clk[VF610_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", PLL5_CTRL, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT);
+       clk[VF610_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", PLL6_CTRL, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT);
+       clk[VF610_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", PLL7_CTRL, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT);
+
+       /* Do not bypass PLLs initially */
+       clk_set_parent(clk[VF610_PLL1_BYPASS], clk[VF610_CLK_PLL1]);
+       clk_set_parent(clk[VF610_PLL2_BYPASS], clk[VF610_CLK_PLL2]);
+       clk_set_parent(clk[VF610_PLL3_BYPASS], clk[VF610_CLK_PLL3]);
+       clk_set_parent(clk[VF610_PLL4_BYPASS], clk[VF610_CLK_PLL4]);
+       clk_set_parent(clk[VF610_PLL5_BYPASS], clk[VF610_CLK_PLL5]);
+       clk_set_parent(clk[VF610_PLL6_BYPASS], clk[VF610_CLK_PLL6]);
+       clk_set_parent(clk[VF610_PLL7_BYPASS], clk[VF610_CLK_PLL7]);
+
+       clk[VF610_CLK_PLL1_SYS]      = imx_clk_gate("pll1_sys",      "pll1_bypass", PLL1_CTRL, 13);
+       clk[VF610_CLK_PLL2_BUS]      = imx_clk_gate("pll2_bus",      "pll2_bypass", PLL2_CTRL, 13);
+       clk[VF610_CLK_PLL3_USB_OTG]  = imx_clk_gate("pll3_usb_otg",  "pll3_bypass", PLL3_CTRL, 13);
+       clk[VF610_CLK_PLL4_AUDIO]    = imx_clk_gate("pll4_audio",    "pll4_bypass", PLL4_CTRL, 13);
+       clk[VF610_CLK_PLL5_ENET]     = imx_clk_gate("pll5_enet",     "pll5_bypass", PLL5_CTRL, 13);
+       clk[VF610_CLK_PLL6_VIDEO]    = imx_clk_gate("pll6_video",    "pll6_bypass", PLL6_CTRL, 13);
+       clk[VF610_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", PLL7_CTRL, 13);
+
+       clk[VF610_CLK_LVDS1_IN]  = imx_clk_gate_exclusive("lvds1_in", "anaclk1", ANA_MISC1, 12, BIT(10));
+
+       clk[VF610_CLK_PLL1_PFD1] = imx_clk_pfd("pll1_pfd1", "pll1_sys", PFD_PLL1_BASE, 0);
+       clk[VF610_CLK_PLL1_PFD2] = imx_clk_pfd("pll1_pfd2", "pll1_sys", PFD_PLL1_BASE, 1);
+       clk[VF610_CLK_PLL1_PFD3] = imx_clk_pfd("pll1_pfd3", "pll1_sys", PFD_PLL1_BASE, 2);
+       clk[VF610_CLK_PLL1_PFD4] = imx_clk_pfd("pll1_pfd4", "pll1_sys", PFD_PLL1_BASE, 3);
+
+       clk[VF610_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1", "pll2_bus", PFD_PLL2_BASE, 0);
+       clk[VF610_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2", "pll2_bus", PFD_PLL2_BASE, 1);
+       clk[VF610_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3", "pll2_bus", PFD_PLL2_BASE, 2);
+       clk[VF610_CLK_PLL2_PFD4] = imx_clk_pfd("pll2_pfd4", "pll2_bus", PFD_PLL2_BASE, 3);
+
+       clk[VF610_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1", "pll3_usb_otg", PFD_PLL3_BASE, 0);
+       clk[VF610_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2", "pll3_usb_otg", PFD_PLL3_BASE, 1);
+       clk[VF610_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3", "pll3_usb_otg", PFD_PLL3_BASE, 2);
+       clk[VF610_CLK_PLL3_PFD4] = imx_clk_pfd("pll3_pfd4", "pll3_usb_otg", PFD_PLL3_BASE, 3);
 
        clk[VF610_CLK_PLL1_PFD_SEL] = imx_clk_mux("pll1_pfd_sel", CCM_CCSR, 16, 3, pll1_sels, 5);
        clk[VF610_CLK_PLL2_PFD_SEL] = imx_clk_mux("pll2_pfd_sel", CCM_CCSR, 19, 3, pll2_sels, 5);
@@ -167,12 +217,12 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
        clk[VF610_CLK_PLATFORM_BUS] = imx_clk_divider("platform_bus", "sys_bus", CCM_CACRR, 3, 3);
        clk[VF610_CLK_IPG_BUS] = imx_clk_divider("ipg_bus", "platform_bus", CCM_CACRR, 11, 2);
 
-       clk[VF610_CLK_PLL3_MAIN_DIV] = imx_clk_divider("pll3_main_div", "pll3_main", CCM_CACRR, 20, 1);
-       clk[VF610_CLK_PLL4_MAIN_DIV] = clk_register_divider_table(NULL, "pll4_main_div", "pll4_main", 0, CCM_CACRR, 6, 3, 0, pll4_main_div_table, &imx_ccm_lock);
-       clk[VF610_CLK_PLL6_MAIN_DIV] = imx_clk_divider("pll6_main_div", "pll6_main", CCM_CACRR, 21, 1);
+       clk[VF610_CLK_PLL3_MAIN_DIV] = imx_clk_divider("pll3_usb_otg_div", "pll3_usb_otg", CCM_CACRR, 20, 1);
+       clk[VF610_CLK_PLL4_MAIN_DIV] = clk_register_divider_table(NULL, "pll4_audio_div", "pll4_audio", 0, CCM_CACRR, 6, 3, 0, pll4_audio_div_table, &imx_ccm_lock);
+       clk[VF610_CLK_PLL6_MAIN_DIV] = imx_clk_divider("pll6_video_div", "pll6_video", CCM_CACRR, 21, 1);
 
-       clk[VF610_CLK_USBPHY0] = imx_clk_gate("usbphy0", "pll3_main", PLL3_CTRL, 6);
-       clk[VF610_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll7_main", PLL7_CTRL, 6);
+       clk[VF610_CLK_USBPHY0] = imx_clk_gate("usbphy0", "pll3_usb_otg", PLL3_CTRL, 6);
+       clk[VF610_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll7_usb_host", PLL7_CTRL, 6);
 
        clk[VF610_CLK_USBC0] = imx_clk_gate2("usbc0", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(4));
        clk[VF610_CLK_USBC1] = imx_clk_gate2("usbc1", "ipg_bus", CCM_CCGR7, CCM_CCGRx_CGn(4));
@@ -191,8 +241,8 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
        clk[VF610_CLK_QSPI1_X1_DIV] = imx_clk_divider("qspi1_x1", "qspi1_x2", CCM_CSCDR3, 11, 1);
        clk[VF610_CLK_QSPI1] = imx_clk_gate2("qspi1", "qspi1_x1", CCM_CCGR8, CCM_CCGRx_CGn(4));
 
-       clk[VF610_CLK_ENET_50M] = imx_clk_fixed_factor("enet_50m", "pll5_main", 1, 10);
-       clk[VF610_CLK_ENET_25M] = imx_clk_fixed_factor("enet_25m", "pll5_main", 1, 20);
+       clk[VF610_CLK_ENET_50M] = imx_clk_fixed_factor("enet_50m", "pll5_enet", 1, 10);
+       clk[VF610_CLK_ENET_25M] = imx_clk_fixed_factor("enet_25m", "pll5_enet", 1, 20);
        clk[VF610_CLK_ENET_SEL] = imx_clk_mux("enet_sel", CCM_CSCMR2, 4, 2, rmii_sels, 4);
        clk[VF610_CLK_ENET_TS_SEL] = imx_clk_mux("enet_ts_sel", CCM_CSCMR2, 0, 3, enet_ts_sels, 7);
        clk[VF610_CLK_ENET] = imx_clk_gate("enet", "enet_sel", CCM_CSCDR1, 24);
index 559c69a477317b2b0e718ae64af18ebf5a67b5e5..7d11979da030a7dccd70e54235ba4b2baf65f887 100644 (file)
@@ -76,7 +76,7 @@ static inline void __indirect_writeb(u8 value, volatile void __iomem *p)
        u32 n, byte_enables, data;
 
        if (!is_pci_memory(addr)) {
-               __raw_writeb(value, addr);
+               __raw_writeb(value, p);
                return;
        }
 
@@ -141,7 +141,7 @@ static inline unsigned char __indirect_readb(const volatile void __iomem *p)
        u32 n, byte_enables, data;
 
        if (!is_pci_memory(addr))
-               return __raw_readb(addr);
+               return __raw_readb(p);
 
        n = addr % 4;
        byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
index 6478626e3ff64c035e11ac69a290099c44d972a8..d0d39f150fabf845d33a5ecff9c1d2687d387bb3 100644 (file)
@@ -188,7 +188,7 @@ static void __init thermal_quirk(void)
 
 static void __init mvebu_dt_init(void)
 {
-       if (of_machine_is_compatible("plathome,openblocks-ax3-4"))
+       if (of_machine_is_compatible("marvell,armadaxp"))
                i2c_quirk();
        if (of_machine_is_compatible("marvell,a375-db")) {
                external_abort_quirk();
index 2bdc3233abe2bcc78c527bf8efe4b0032a5880dc..044b51185fccb2e68c1f89c4efb3822704d28488 100644 (file)
@@ -400,6 +400,8 @@ int __init coherency_init(void)
                 type == COHERENCY_FABRIC_TYPE_ARMADA_380)
                armada_375_380_coherency_init(np);
 
+       of_node_put(np);
+
        return 0;
 }
 
index d22c30d3ccfa0809d2662cbd5390c20f40b24a55..8c58b71c2727cbf544582f9b3f6cd4fc00984035 100644 (file)
@@ -917,6 +917,10 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
 static int __init omap_device_late_init(void)
 {
        bus_for_each_dev(&platform_bus_type, NULL, NULL, omap_device_late_idle);
+
+       WARN(!of_have_populated_dt(),
+               "legacy booting deprecated, please update to boot with .dts\n");
+
        return 0;
 }
 omap_late_initcall_sync(omap_device_late_init);
index bbf9df37ad4b6cf540166d23c1a3e0690b6b7176..d28fe291233a55ddb0173bcd71e124fc900010e2 100644 (file)
 #define DMEMC_VIRT             IOMEM(0xf6100000)
 #define DMEMC_SIZE             0x00100000
 
+/*
+ * Reserved space for low level debug virtual addresses within
+ * 0xf6200000..0xf6201000
+ */
+
 /*
  * Internal Memory Controller (PXA27x and later)
  */
index a6503d8c77de07c76288de39d6c4ede94db91090..004ed92ee598df3c9369e8bfbf227e48173135e0 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <linux/gpio.h>
index b222f68d55b7251ef2d8da0e2fcd50005fdfdef4..66f67816a844623977a4595ef23642ed381b3549 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <linux/gpio.h>
index e709835344038a5a1fbace09bcf68d554c27aa8b..0e912aff53ded12a956d8e190f5e0fb130d767ab 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/clk.h>
index 79c47847f2004d5921d3061ff7a73f120a1b4ce3..d649ade4a202a2794abb107fd5b252047b5958c9 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <linux/of_platform.h>
index 1cf2c75dacfb49b94c5a26c03d6edb15c064baf1..f27b5a833bf0bb966a7046ae18a64f3f306f3134 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <linux/mfd/tmio.h>
index 46aa540133d6b81cb05053ffaab7a1a28aa21553..451ba624ce6e343d023ddfb5595624484cfeb781 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
-#include <linux/dma-mapping.h>
 #include <linux/kernel.h>
 #include <linux/of_platform.h>
-#include <linux/platform_data/rcar-du.h>
 
 #include <asm/mach/arch.h>
 
-#include "clock.h"
 #include "common.h"
-#include "irqs.h"
 #include "r8a7791.h"
 #include "rcar-gen2.h"
 
-/* DU */
-static struct rcar_du_encoder_data koelsch_du_encoders[] = {
-       {
-               .type = RCAR_DU_ENCODER_NONE,
-               .output = RCAR_DU_OUTPUT_LVDS0,
-               .connector.lvds.panel = {
-                       .width_mm = 210,
-                       .height_mm = 158,
-                       .mode = {
-                               .pixelclock = 65000000,
-                               .hactive = 1024,
-                               .hfront_porch = 20,
-                               .hback_porch = 160,
-                               .hsync_len = 136,
-                               .vactive = 768,
-                               .vfront_porch = 3,
-                               .vback_porch = 29,
-                               .vsync_len = 6,
-                       },
-               },
-       },
-};
-
-static struct rcar_du_platform_data koelsch_du_pdata = {
-       .encoders = koelsch_du_encoders,
-       .num_encoders = ARRAY_SIZE(koelsch_du_encoders),
-};
-
-static const struct resource du_resources[] __initconst = {
-       DEFINE_RES_MEM(0xfeb00000, 0x40000),
-       DEFINE_RES_MEM_NAMED(0xfeb90000, 0x1c, "lvds.0"),
-       DEFINE_RES_IRQ(gic_spi(256)),
-       DEFINE_RES_IRQ(gic_spi(268)),
-};
-
-static void __init koelsch_add_du_device(void)
-{
-       struct platform_device_info info = {
-               .name = "rcar-du-r8a7791",
-               .id = -1,
-               .res = du_resources,
-               .num_res = ARRAY_SIZE(du_resources),
-               .data = &koelsch_du_pdata,
-               .size_data = sizeof(koelsch_du_pdata),
-               .dma_mask = DMA_BIT_MASK(32),
-       };
-
-       platform_device_register_full(&info);
-}
-
-/*
- * This is a really crude hack to provide clkdev support to platform
- * devices until they get moved to DT.
- */
-static const struct clk_name clk_names[] __initconst = {
-       { "du0", "du.0", "rcar-du-r8a7791" },
-       { "du1", "du.1", "rcar-du-r8a7791" },
-       { "lvds0", "lvds.0", "rcar-du-r8a7791" },
-};
-
-static void __init koelsch_add_standard_devices(void)
-{
-       shmobile_clk_workaround(clk_names, ARRAY_SIZE(clk_names), false);
-       of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-
-       koelsch_add_du_device();
-}
-
 static const char * const koelsch_boards_compat_dt[] __initconst = {
        "renesas,koelsch",
        "renesas,koelsch-reference",
@@ -110,7 +34,6 @@ DT_MACHINE_START(KOELSCH_DT, "koelsch")
        .smp            = smp_ops(r8a7791_smp_ops),
        .init_early     = shmobile_init_delay,
        .init_time      = rcar_gen2_timer_init,
-       .init_machine   = koelsch_add_standard_devices,
        .init_late      = shmobile_init_late,
        .reserve        = rcar_gen2_reserve,
        .dt_compat      = koelsch_boards_compat_dt,
index 7111b5c1d67b764f79daec420390fea7f408c92c..3a6a2766dc2b8699a5a6d47f89dbedda4cd2c996 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <linux/dma-mapping.h>
index d9cdf9a97e2390b4f63cb4b22b11246b2088bea6..f2ef759b6e969cc811f283c7700ef18de0f057ac 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <linux/delay.h>
index 77e36fa0b14216bb644149b2969267961ae43367..7c9b63bdde9fa458c3ddc7aa751030e0484beaf2 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <linux/delay.h>
index bc4b48357ddea891ccb0365ba267325adff374d8..fa06bdba61df19d33fe6990eaa0a867855434c98 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
-#include <linux/dma-mapping.h>
 #include <linux/init.h>
 #include <linux/of_platform.h>
-#include <linux/platform_data/rcar-du.h>
 
 #include <asm/mach/arch.h>
 
-#include "clock.h"
 #include "common.h"
-#include "irqs.h"
 #include "r8a7790.h"
 #include "rcar-gen2.h"
 
-/* DU */
-static struct rcar_du_encoder_data lager_du_encoders[] = {
-       {
-               .type = RCAR_DU_ENCODER_VGA,
-               .output = RCAR_DU_OUTPUT_DPAD0,
-       }, {
-               .type = RCAR_DU_ENCODER_NONE,
-               .output = RCAR_DU_OUTPUT_LVDS1,
-               .connector.lvds.panel = {
-                       .width_mm = 210,
-                       .height_mm = 158,
-                       .mode = {
-                               .pixelclock = 65000000,
-                               .hactive = 1024,
-                               .hfront_porch = 20,
-                               .hback_porch = 160,
-                               .hsync_len = 136,
-                               .vactive = 768,
-                               .vfront_porch = 3,
-                               .vback_porch = 29,
-                               .vsync_len = 6,
-                       },
-               },
-       },
-};
-
-static struct rcar_du_platform_data lager_du_pdata = {
-       .encoders = lager_du_encoders,
-       .num_encoders = ARRAY_SIZE(lager_du_encoders),
-};
-
-static const struct resource du_resources[] __initconst = {
-       DEFINE_RES_MEM(0xfeb00000, 0x70000),
-       DEFINE_RES_MEM_NAMED(0xfeb90000, 0x1c, "lvds.0"),
-       DEFINE_RES_MEM_NAMED(0xfeb94000, 0x1c, "lvds.1"),
-       DEFINE_RES_IRQ(gic_spi(256)),
-       DEFINE_RES_IRQ(gic_spi(268)),
-       DEFINE_RES_IRQ(gic_spi(269)),
-};
-
-static void __init lager_add_du_device(void)
-{
-       struct platform_device_info info = {
-               .name = "rcar-du-r8a7790",
-               .id = -1,
-               .res = du_resources,
-               .num_res = ARRAY_SIZE(du_resources),
-               .data = &lager_du_pdata,
-               .size_data = sizeof(lager_du_pdata),
-               .dma_mask = DMA_BIT_MASK(32),
-       };
-
-       platform_device_register_full(&info);
-}
-
-/*
- * This is a really crude hack to provide clkdev support to platform
- * devices until they get moved to DT.
- */
-static const struct clk_name clk_names[] __initconst = {
-       { "du0", "du.0", "rcar-du-r8a7790" },
-       { "du1", "du.1", "rcar-du-r8a7790" },
-       { "du2", "du.2", "rcar-du-r8a7790" },
-       { "lvds0", "lvds.0", "rcar-du-r8a7790" },
-       { "lvds1", "lvds.1", "rcar-du-r8a7790" },
-};
-
-static void __init lager_add_standard_devices(void)
-{
-       shmobile_clk_workaround(clk_names, ARRAY_SIZE(clk_names), false);
-       of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-
-       lager_add_du_device();
-}
-
 static const char *lager_boards_compat_dt[] __initdata = {
        "renesas,lager",
        "renesas,lager-reference",
@@ -116,7 +33,6 @@ DT_MACHINE_START(LAGER_DT, "lager")
        .smp            = smp_ops(r8a7790_smp_ops),
        .init_early     = shmobile_init_delay,
        .init_time      = rcar_gen2_timer_init,
-       .init_machine   = lager_add_standard_devices,
        .init_late      = shmobile_init_late,
        .reserve        = rcar_gen2_reserve,
        .dt_compat      = lager_boards_compat_dt,
index 571327b1c942c138fbba5cfe8e5ea03d7896ccb1..f8197eb6e5669ada1b8ddd57e7bb09e42bedfe62 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <linux/gpio.h>
@@ -36,7 +32,6 @@
 #include <linux/pinctrl/machine.h>
 #include <linux/platform_data/camera-rcar.h>
 #include <linux/platform_data/gpio-rcar.h>
-#include <linux/platform_data/rcar-du.h>
 #include <linux/platform_data/usb-rcar-gen2-phy.h>
 #include <linux/platform_device.h>
 #include <linux/phy.h>
  *
  */
 
-/* DU */
-static struct rcar_du_encoder_data lager_du_encoders[] = {
-       {
-               .type = RCAR_DU_ENCODER_VGA,
-               .output = RCAR_DU_OUTPUT_DPAD0,
-       }, {
-               .type = RCAR_DU_ENCODER_NONE,
-               .output = RCAR_DU_OUTPUT_LVDS1,
-               .connector.lvds.panel = {
-                       .width_mm = 210,
-                       .height_mm = 158,
-                       .mode = {
-                               .pixelclock = 65000000,
-                               .hactive = 1024,
-                               .hfront_porch = 20,
-                               .hback_porch = 160,
-                               .hsync_len = 136,
-                               .vactive = 768,
-                               .vfront_porch = 3,
-                               .vback_porch = 29,
-                               .vsync_len = 6,
-                       },
-               },
-       },
-};
-
-static const struct rcar_du_platform_data lager_du_pdata __initconst = {
-       .encoders = lager_du_encoders,
-       .num_encoders = ARRAY_SIZE(lager_du_encoders),
-};
-
-static const struct resource du_resources[] __initconst = {
-       DEFINE_RES_MEM(0xfeb00000, 0x70000),
-       DEFINE_RES_MEM_NAMED(0xfeb90000, 0x1c, "lvds.0"),
-       DEFINE_RES_MEM_NAMED(0xfeb94000, 0x1c, "lvds.1"),
-       DEFINE_RES_IRQ(gic_spi(256)),
-       DEFINE_RES_IRQ(gic_spi(268)),
-       DEFINE_RES_IRQ(gic_spi(269)),
-};
-
-static void __init lager_add_du_device(void)
-{
-       struct platform_device_info info = {
-               .name = "rcar-du-r8a7790",
-               .id = -1,
-               .res = du_resources,
-               .num_res = ARRAY_SIZE(du_resources),
-               .data = &lager_du_pdata,
-               .size_data = sizeof(lager_du_pdata),
-               .dma_mask = DMA_BIT_MASK(32),
-       };
-
-       platform_device_register_full(&info);
-}
-
 /* LEDS */
 static struct gpio_led lager_leds[] = {
        {
@@ -804,8 +744,6 @@ static void __init lager_add_standard_devices(void)
 
        platform_device_register_full(&ether_info);
 
-       lager_add_du_device();
-
        platform_device_register_resndata(NULL, "qspi", 0,
                                          qspi_resources,
                                          ARRAY_SIZE(qspi_resources),
index ca5d34b92aa7fc8e70d55cf4e6c2cf31aa103e13..ed1087031c5def92376b7acc100ee9dd8234cff2 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/delay.h>
 #include <linux/kernel.h>
index 38d9cdd26587ebf3c843ea25d09d26674b9430c4..f0757bbaff872a428f6ac889cff47fd811d33ea8 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <linux/clk/shmobile.h>
index ce33d7825c49a818a2bb1927299be684a7196544..598f704f76ae7420c96f420c9d3c9b63bbf09edc 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <linux/kernel.h>
@@ -31,7 +27,6 @@
 #include <linux/pinctrl/machine.h>
 #include <linux/platform_data/camera-rcar.h>
 #include <linux/platform_data/gpio-rcar.h>
-#include <linux/platform_data/rcar-du.h>
 #include <linux/platform_data/usb-rcar-phy.h>
 #include <linux/regulator/fixed.h>
 #include <linux/regulator/machine.h>
@@ -175,62 +170,6 @@ static struct platform_device hspi_device = {
        .num_resources  = ARRAY_SIZE(hspi_resources),
 };
 
-/*
- * DU
- *
- * The panel only specifies the [hv]display and [hv]total values. The position
- * and width of the sync pulses don't matter, they're copied from VESA timings.
- */
-static struct rcar_du_encoder_data du_encoders[] = {
-       {
-               .type = RCAR_DU_ENCODER_VGA,
-               .output = RCAR_DU_OUTPUT_DPAD0,
-       }, {
-               .type = RCAR_DU_ENCODER_LVDS,
-               .output = RCAR_DU_OUTPUT_DPAD1,
-               .connector.lvds.panel = {
-                       .width_mm = 210,
-                       .height_mm = 158,
-                       .mode = {
-                               .pixelclock = 65000000,
-                               .hactive = 1024,
-                               .hfront_porch = 20,
-                               .hback_porch = 160,
-                               .hsync_len = 136,
-                               .vactive = 768,
-                               .vfront_porch = 3,
-                               .vback_porch = 29,
-                               .vsync_len = 6,
-                       },
-               },
-       },
-};
-
-static const struct rcar_du_platform_data du_pdata __initconst = {
-       .encoders = du_encoders,
-       .num_encoders = ARRAY_SIZE(du_encoders),
-};
-
-static const struct resource du_resources[] __initconst = {
-       DEFINE_RES_MEM(0xfff80000, 0x40000),
-       DEFINE_RES_IRQ(gic_iid(0x3f)),
-};
-
-static void __init marzen_add_du_device(void)
-{
-       struct platform_device_info info = {
-               .name = "rcar-du-r8a7779",
-               .id = -1,
-               .res = du_resources,
-               .num_res = ARRAY_SIZE(du_resources),
-               .data = &du_pdata,
-               .size_data = sizeof(du_pdata),
-               .dma_mask = DMA_BIT_MASK(32),
-       };
-
-       platform_device_register_full(&info);
-}
-
 /* LEDS */
 static struct gpio_led marzen_leds[] = {
        {
@@ -389,7 +328,6 @@ static void __init marzen_init(void)
        platform_device_register_full(&vin1_info);
        platform_device_register_full(&vin3_info);
        platform_add_devices(marzen_devices, ARRAY_SIZE(marzen_devices));
-       marzen_add_du_device();
 }
 
 static const char *marzen_boards_compat_dt[] __initdata = {
index c2330ea1802c6142dc4319836afe1490a7f4d5aa..1cf44dc6d718bcc0da183bc4e19aa595dfc7d08f 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/init.h>
 #include <linux/io.h>
index 0794f0426e7044810ec46d695c4a9e0c87abd38b..9cac8247c72b6e4565e34523a19519815a19f6c1 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -455,7 +451,7 @@ enum {
        MSTP128, MSTP127, MSTP125,
        MSTP116, MSTP111, MSTP100, MSTP117,
 
-       MSTP230,
+       MSTP230, MSTP229,
        MSTP222,
        MSTP218, MSTP217, MSTP216, MSTP214,
        MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
@@ -474,11 +470,12 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_S],   SMSTPCR1, 27, 0), /* CEU20 */
        [MSTP125] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
        [MSTP117] = SH_CLK_MSTP32(&div4_clks[DIV4_B],   SMSTPCR1, 17, 0), /* LCDC1 */
-       [MSTP116] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
+       [MSTP116] = SH_CLK_MSTP32(&div4_clks[DIV4_HPP], SMSTPCR1, 16, 0), /* IIC0 */
        [MSTP111] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 11, 0), /* TMU1 */
        [MSTP100] = SH_CLK_MSTP32(&div4_clks[DIV4_B],   SMSTPCR1,  0, 0), /* LCDC0 */
 
        [MSTP230] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 30, 0), /* SCIFA6 */
+       [MSTP229] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],  SMSTPCR2, 29, 0), /* INTCA */
        [MSTP222] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 22, 0), /* SCIFA7 */
        [MSTP218] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],  SMSTPCR2, 18, 0), /* DMAC1 */
        [MSTP217] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],  SMSTPCR2, 17, 0), /* DMAC2 */
@@ -575,6 +572,10 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("sh-dma-engine.0",        &mstp_clks[MSTP218]),
        CLKDEV_DEV_ID("sh-sci.7",               &mstp_clks[MSTP222]),
        CLKDEV_DEV_ID("e6cd0000.serial",        &mstp_clks[MSTP222]),
+       CLKDEV_DEV_ID("renesas_intc_irqpin.0",  &mstp_clks[MSTP229]),
+       CLKDEV_DEV_ID("renesas_intc_irqpin.1",  &mstp_clks[MSTP229]),
+       CLKDEV_DEV_ID("renesas_intc_irqpin.2",  &mstp_clks[MSTP229]),
+       CLKDEV_DEV_ID("renesas_intc_irqpin.3",  &mstp_clks[MSTP229]),
        CLKDEV_DEV_ID("sh-sci.6",               &mstp_clks[MSTP230]),
        CLKDEV_DEV_ID("e6cc0000.serial",        &mstp_clks[MSTP230]),
 
index 67980a08a601bfee89430fa48be2141ab3d82bb5..e8510c35558c11637d35e08d78690fbe87eff7c6 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 /*
index c51f9db3f66fb2f9408094cc112b52c3d716d534..fa8ab2cc91878af5f568b58521b62d3b9340a96e 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 #include <linux/bitops.h>
 #include <linux/init.h>
index 126ddafad5265dc62793fd6e7f25aea16b7c42e1..f9bbc5f0a9a12a8f61510e8d72df280149548c33 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/init.h>
 #include <linux/io.h>
@@ -68,7 +64,7 @@
 
 #define SDCKCR         0xE6150074
 #define SD2CKCR                0xE6150078
-#define SD3CKCR                0xE615007C
+#define SD3CKCR                0xE615026C
 #define MMC0CKCR       0xE6150240
 #define MMC1CKCR       0xE6150244
 #define SSPCKCR                0xE6150248
index 453b23129cfa0cd3903e7ebdc3aaef8009bb3542..82143ca3bae9ab7b72877c407425010bbcac51aa 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/init.h>
 #include <linux/io.h>
index 7071676145c497ae911fd7199b721692c71c2b8e..3bc92f46060e9ce55c793ea6104b6214d84b8bb7 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
index 02a6f45a0b9e1c832d5c5d6bfcb79395a42363a2..6b4c1f313cc987c15ab0028b0f82ed4cde98a0fb 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
index 806f94038cc49a8421f85d305a018113a59009b8..1f81ad747153b6bd0ae90b9bc48ecb7a6200b65c 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
index f2e79f2376e19f6f0a227af94bf2e4fc3be76409..e329ccbd0a6734f1e1c477f63ec878779cbce3d5 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
index f45dde701d7b598c0bc7f2544ea438b0ea19327e..69df8bfac1672202073d5096631e1897857f5a5e 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
  */
 
 #include <linux/linkage.h>
index e2af00b1bd9dc465eefec06ad84398b41dda1657..1ccf49cb485fd96ae5f96c5d2f85d06f8dc7d02e 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
index 44457a94897b28708c83cbb12a394ab857b860f1..9e3618028accea72442a4ca8fc1a09b36f267dd2 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
index f369b4b0863d0bb38a2705e7c2fc12e83438c340..ca7805ad7ea3d64ea24c487f1ff1c5981e0f6e7f 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #ifndef __ASM_R8A7740_H__
index f4076a50e970a357c8c9dfbbbc07d4f6d61ada25..9086dfc6746af9376c70cc9804e37c127bc06928 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #ifndef __ASM_R8A7778_H__
 #define __ASM_R8A7778_H__
index b06a9e8f59a5fb7a232f00a747e1715bc23b5a71..aad97be9cbe1b0fabe069136e2e264cc51de8a24 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
index 4122104359f98d909ae03a923256b479972ff3e0..171174777b6f8d3fa447cd183eafbf3d3b9ff291 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <linux/kernel.h>
index 53f40b70680de9087fa90c0e61ae2711ac8a0fc7..b88b88a40a3c6319686d1db1a292a63d9cba6e8c 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/irq.h>
 #include <linux/kernel.h>
index 8894e1b7ab0e65bbf66975ac36ca9b2b5917604f..fe15dd26d15dc3ec5a33a94528b066595849eadb 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
index 85fe016d6a872a6233effacc6118f2195d8cdbc8..8ec784fc6b266443bfd32881d06134dde919dc16 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <linux/kernel.h>
index 136078ab9407cc2d2f31b43b213c105e6bbaaa4e..d08e75cceaab3ead2a84eb294bfa08ddcbd6d409 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
index 877fdeb985d0c240e52eea158eeb16edd1b2ef68..ec7d97dca4de2108cbb3a7bbb3ee5b52dd0d8f95 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <linux/irq.h>
index 35d78639244fd805d08d2f44954a5f889379ff6c..d930925f8f1a1ee8837316f5ab30de3f0839cace 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <linux/irq.h>
index 42d5b43089235375e1a1d75b440a32d8940ac1ed..a669377aea5798194e65451aaf2c51e99d36e808 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <linux/clk/shmobile.h>
index d646c8d12423a600332f5e5876f75445426fdca3..e81c38538e13f12e639e0ffa47a140cd21f4e1f7 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
index b7bd8e50966879608cde0e5c152cefb12185d9f1..f2e01dc22a08b75545c4482fdbe803cb42d3b554 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -26,6 +22,7 @@
 #include <linux/of_platform.h>
 #include <linux/delay.h>
 #include <linux/input.h>
+#include <linux/i2c/i2c-sh_mobile.h>
 #include <linux/io.h>
 #include <linux/serial_sci.h>
 #include <linux/sh_dma.h>
@@ -192,11 +189,18 @@ static struct resource i2c4_resources[] = {
        },
 };
 
+static struct i2c_sh_mobile_platform_data i2c_platform_data = {
+       .clks_per_count = 2,
+};
+
 static struct platform_device i2c0_device = {
        .name           = "i2c-sh_mobile",
        .id             = 0,
        .resource       = i2c0_resources,
        .num_resources  = ARRAY_SIZE(i2c0_resources),
+       .dev            = {
+               .platform_data  = &i2c_platform_data,
+       },
 };
 
 static struct platform_device i2c1_device = {
@@ -204,6 +208,9 @@ static struct platform_device i2c1_device = {
        .id             = 1,
        .resource       = i2c1_resources,
        .num_resources  = ARRAY_SIZE(i2c1_resources),
+       .dev            = {
+               .platform_data  = &i2c_platform_data,
+       },
 };
 
 static struct platform_device i2c2_device = {
@@ -211,6 +218,9 @@ static struct platform_device i2c2_device = {
        .id             = 2,
        .resource       = i2c2_resources,
        .num_resources  = ARRAY_SIZE(i2c2_resources),
+       .dev            = {
+               .platform_data  = &i2c_platform_data,
+       },
 };
 
 static struct platform_device i2c3_device = {
@@ -218,6 +228,9 @@ static struct platform_device i2c3_device = {
        .id             = 3,
        .resource       = i2c3_resources,
        .num_resources  = ARRAY_SIZE(i2c3_resources),
+       .dev            = {
+               .platform_data  = &i2c_platform_data,
+       },
 };
 
 static struct platform_device i2c4_device = {
@@ -225,6 +238,9 @@ static struct platform_device i2c4_device = {
        .id             = 4,
        .resource       = i2c4_resources,
        .num_resources  = ARRAY_SIZE(i2c4_resources),
+       .dev            = {
+               .platform_data  = &i2c_platform_data,
+       },
 };
 
 static const struct sh_dmae_slave_config sh73a0_dmae_slaves[] = {
index 9782862899e81f1895ae0f1312cdfa5623d58914..146b8de16432fe3ca573243ded57e8dc8208218d 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
  */
 
 #include <linux/linkage.h>
index 6ff1df1df9a752c313514284a09a0e747e906c4c..baff3b5efed8c31b5e1c5cd18e21e4348dcc1360 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
index 3100e355c3fde4e589aa015a50a68e7b1f57da85..3f761f8390430d61fa7753511412d06827eaf284 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
index 22d8f87b23e9006b206a3c04fe9720a384926c81..c16dbfe9836c527de5116c5620434ffb1ced2ef6 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
index 87c6be1e79bd989d24c64ccfc4a95342be5ddc4d..1081b763e0f390d627c2ce7f561a7533bc19872b 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 #include <linux/platform_device.h>
 #include <linux/clocksource.h>
index da7be13aecce3cd8d12de9b64c10b6e3facf252b..ab95f5391a2b631e5cace17bbb176766e7d410bf 100644 (file)
@@ -99,42 +99,42 @@ static inline void tegra_irq_write_mask(unsigned int irq, unsigned long reg)
 
 static void tegra_mask(struct irq_data *d)
 {
-       if (d->irq < FIRST_LEGACY_IRQ)
+       if (d->hwirq < FIRST_LEGACY_IRQ)
                return;
 
-       tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_CLR);
+       tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_CLR);
 }
 
 static void tegra_unmask(struct irq_data *d)
 {
-       if (d->irq < FIRST_LEGACY_IRQ)
+       if (d->hwirq < FIRST_LEGACY_IRQ)
                return;
 
-       tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_SET);
+       tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_SET);
 }
 
 static void tegra_ack(struct irq_data *d)
 {
-       if (d->irq < FIRST_LEGACY_IRQ)
+       if (d->hwirq < FIRST_LEGACY_IRQ)
                return;
 
-       tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR);
+       tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR);
 }
 
 static void tegra_eoi(struct irq_data *d)
 {
-       if (d->irq < FIRST_LEGACY_IRQ)
+       if (d->hwirq < FIRST_LEGACY_IRQ)
                return;
 
-       tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR);
+       tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR);
 }
 
 static int tegra_retrigger(struct irq_data *d)
 {
-       if (d->irq < FIRST_LEGACY_IRQ)
+       if (d->hwirq < FIRST_LEGACY_IRQ)
                return 0;
 
-       tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_SET);
+       tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_SET);
 
        return 1;
 }
@@ -142,7 +142,7 @@ static int tegra_retrigger(struct irq_data *d)
 #ifdef CONFIG_PM_SLEEP
 static int tegra_set_wake(struct irq_data *d, unsigned int enable)
 {
-       u32 irq = d->irq;
+       u32 irq = d->hwirq;
        u32 index, mask;
 
        if (irq < FIRST_LEGACY_IRQ ||
index ae69809a9e479bef7617e41627b082452c93d09e..7eb94e6fc37672f82c115c16926832a2089329c4 100644 (file)
@@ -798,6 +798,7 @@ config NEED_KUSER_HELPERS
 
 config KUSER_HELPERS
        bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
+       depends on MMU
        default y
        help
          Warning: disabling this option may break user programs.
index b3a947863ac7bb7e38d47b7a640d698b55a34bbc..22ac2a6fbfe373b432f43b1041ca9cf42e189837 100644 (file)
@@ -270,7 +270,6 @@ __v7_pj4b_setup:
 /* Auxiliary Debug Modes Control 1 Register */
 #define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */
 #define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */
-#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */
 #define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */
 
 /* Auxiliary Debug Modes Control 2 Register */
@@ -293,7 +292,6 @@ __v7_pj4b_setup:
        /* Auxiliary Debug Modes Control 1 Register */
        mrc     p15, 1, r0, c15, c1, 1
        orr     r0, r0, #PJ4B_CLEAN_LINE
-       orr     r0, r0, #PJ4B_BCK_OFF_STREX
        orr     r0, r0, #PJ4B_INTER_PARITY
        bic     r0, r0, #PJ4B_STATIC_BP
        mcr     p15, 1, r0, c15, c1, 1
index 23259f104c66fd367d4663cbd4adafd240ffa50d..afa2b3c4df4a267e5a609c13e6e7d6461be85616 100644 (file)
@@ -535,7 +535,7 @@ ENTRY(cpu_xscale_do_suspend)
        mrc     p15, 0, r5, c15, c1, 0  @ CP access reg
        mrc     p15, 0, r6, c13, c0, 0  @ PID
        mrc     p15, 0, r7, c3, c0, 0   @ domain ID
-       mrc     p15, 0, r8, c1, c1, 0   @ auxiliary control reg
+       mrc     p15, 0, r8, c1, c0, 1   @ auxiliary control reg
        mrc     p15, 0, r9, c1, c0, 0   @ control reg
        bic     r4, r4, #2              @ clear frequency change bit
        stmia   r0, {r4 - r9}           @ store cp regs
@@ -552,7 +552,7 @@ ENTRY(cpu_xscale_do_resume)
        mcr     p15, 0, r6, c13, c0, 0  @ PID
        mcr     p15, 0, r7, c3, c0, 0   @ domain ID
        mcr     p15, 0, r1, c2, c0, 0   @ translation table base addr
-       mcr     p15, 0, r8, c1, c1, 0   @ auxiliary control reg
+       mcr     p15, 0, r8, c1, c0, 1   @ auxiliary control reg
        mov     r0, r9                  @ control register
        b       cpu_resume_mmu
 ENDPROC(cpu_xscale_do_resume)
index b61a3bcc2fa83bb028933edc80d934e5d1222386..e048f6198d68d69449c642c71e45db6022b4e4e9 100644 (file)
@@ -497,6 +497,34 @@ static void orion_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 #define orion_gpio_dbg_show NULL
 #endif
 
+static void orion_gpio_unmask_irq(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct irq_chip_type *ct = irq_data_get_chip_type(d);
+       u32 reg_val;
+       u32 mask = d->mask;
+
+       irq_gc_lock(gc);
+       reg_val = irq_reg_readl(gc->reg_base + ct->regs.mask);
+       reg_val |= mask;
+       irq_reg_writel(reg_val, gc->reg_base + ct->regs.mask);
+       irq_gc_unlock(gc);
+}
+
+static void orion_gpio_mask_irq(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct irq_chip_type *ct = irq_data_get_chip_type(d);
+       u32 mask = d->mask;
+       u32 reg_val;
+
+       irq_gc_lock(gc);
+       reg_val = irq_reg_readl(gc->reg_base + ct->regs.mask);
+       reg_val &= ~mask;
+       irq_reg_writel(reg_val, gc->reg_base + ct->regs.mask);
+       irq_gc_unlock(gc);
+}
+
 void __init orion_gpio_init(struct device_node *np,
                            int gpio_base, int ngpio,
                            void __iomem *base, int mask_offset,
@@ -565,8 +593,8 @@ void __init orion_gpio_init(struct device_node *np,
        ct = gc->chip_types;
        ct->regs.mask = ochip->mask_offset + GPIO_LEVEL_MASK_OFF;
        ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
-       ct->chip.irq_mask = irq_gc_mask_clr_bit;
-       ct->chip.irq_unmask = irq_gc_mask_set_bit;
+       ct->chip.irq_mask = orion_gpio_mask_irq;
+       ct->chip.irq_unmask = orion_gpio_unmask_irq;
        ct->chip.irq_set_type = gpio_irq_set_type;
        ct->chip.name = ochip->chip.label;
 
@@ -575,8 +603,8 @@ void __init orion_gpio_init(struct device_node *np,
        ct->regs.ack = GPIO_EDGE_CAUSE_OFF;
        ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
        ct->chip.irq_ack = irq_gc_ack_clr_bit;
-       ct->chip.irq_mask = irq_gc_mask_clr_bit;
-       ct->chip.irq_unmask = irq_gc_mask_set_bit;
+       ct->chip.irq_mask = orion_gpio_mask_irq;
+       ct->chip.irq_unmask = orion_gpio_unmask_irq;
        ct->chip.irq_set_type = gpio_irq_set_type;
        ct->handler = handle_edge_irq;
        ct->chip.name = ochip->chip.label;
index 295c72d52a1f7206f00a29305328f4f19a5f0705..f1ad9c2ab2e917197f3a3a00d7b8b8499138e933 100644 (file)
                        compatible = "apm,xgene-enet";
                        status = "disabled";
                        reg = <0x0 0x17020000 0x0 0xd100>,
-                             <0x0 0X17030000 0x0 0X400>,
+                             <0x0 0X17030000 0x0 0Xc300>,
                              <0x0 0X10000000 0x0 0X200>;
                        reg-names = "enet_csr", "ring_csr", "ring_cmd";
                        interrupts = <0x0 0x3c 0x4>;
                sgenet0: ethernet@1f210000 {
                        compatible = "apm,xgene-enet";
                        status = "disabled";
-                       reg = <0x0 0x1f210000 0x0 0x10000>,
-                             <0x0 0x1f200000 0x0 0X10000>,
-                             <0x0 0x1B000000 0x0 0X20000>;
+                       reg = <0x0 0x1f210000 0x0 0xd100>,
+                             <0x0 0x1f200000 0x0 0Xc300>,
+                             <0x0 0x1B000000 0x0 0X200>;
                        reg-names = "enet_csr", "ring_csr", "ring_cmd";
                        interrupts = <0x0 0xA0 0x4>;
                        dma-coherent;
                        compatible = "apm,xgene-enet";
                        status = "disabled";
                        reg = <0x0 0x1f610000 0x0 0xd100>,
-                             <0x0 0x1f600000 0x0 0X400>,
+                             <0x0 0x1f600000 0x0 0Xc300>,
                              <0x0 0x18000000 0x0 0X200>;
                        reg-names = "enet_csr", "ring_csr", "ring_cmd";
                        interrupts = <0x0 0x60 0x4>;
index 4ce602c2c6de8583697a6b07034a5b7b8432bbda..dd301be89ecccfb6fe9f1d7c02a69bafcfea1559 100644 (file)
@@ -35,6 +35,9 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_ARCH_THUNDER=y
 CONFIG_ARCH_VEXPRESS=y
 CONFIG_ARCH_XGENE=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_XGENE=y
 CONFIG_SMP=y
 CONFIG_PREEMPT=y
 CONFIG_KSM=y
@@ -52,6 +55,7 @@ CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 # CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
+CONFIG_BPF_JIT=y
 # CONFIG_WIRELESS is not set
 CONFIG_NET_9P=y
 CONFIG_NET_9P_VIRTIO=y
@@ -65,16 +69,17 @@ CONFIG_VIRTIO_BLK=y
 CONFIG_BLK_DEV_SD=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
 CONFIG_AHCI_XGENE=y
-CONFIG_PHY_XGENE=y
 CONFIG_PATA_PLATFORM=y
 CONFIG_PATA_OF_PLATFORM=y
 CONFIG_NETDEVICES=y
 CONFIG_TUN=y
 CONFIG_VIRTIO_NET=y
+CONFIG_NET_XGENE=y
 CONFIG_SMC91X=y
 CONFIG_SMSC911X=y
-CONFIG_NET_XGENE=y
 # CONFIG_WLAN is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_SERIO_SERPORT is not set
@@ -87,6 +92,11 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_VIRTIO_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
+# CONFIG_HMC_DRV is not set
+CONFIG_SPI=y
+CONFIG_SPI_PL022=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_XGENE=y
 # CONFIG_HWMON is not set
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -97,13 +107,25 @@ CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set
 # CONFIG_LOGO_LINUX_VGA16 is not set
 CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
 CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_STORAGE=y
+CONFIG_USB_ULPI=y
 CONFIG_MMC=y
 CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SPI=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_XGENE=y
 CONFIG_VIRTIO_BALLOON=y
 CONFIG_VIRTIO_MMIO=y
 # CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PHY_XGENE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
index ccc7087d3c4ea7451ac3734a395b6d6607bbdd2b..a62cd077457b933641d4f016c255e297b332c3bc 100644 (file)
@@ -142,7 +142,7 @@ static inline void *phys_to_virt(phys_addr_t x)
  *  virt_to_page(k)    convert a _valid_ virtual address to struct page *
  *  virt_addr_valid(k) indicates whether a virtual address is valid
  */
-#define ARCH_PFN_OFFSET                PHYS_PFN_OFFSET
+#define ARCH_PFN_OFFSET                ((unsigned long)PHYS_PFN_OFFSET)
 
 #define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 #define        virt_addr_valid(kaddr)  pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
index da1f06b535e38a97bbd981ade5c414d1131ef4e8..9dfdac4a74a17cd01ba21a726166d3e8a803e325 100644 (file)
@@ -792,3 +792,5 @@ __SYSCALL(__NR_renameat2, sys_renameat2)
 __SYSCALL(__NR_getrandom, sys_getrandom)
 #define __NR_memfd_create 385
 __SYSCALL(__NR_memfd_create, sys_memfd_create)
+#define __NR_bpf 386
+__SYSCALL(__NR_bpf, sys_bpf)
index 619b1dd7bcdea70e184daacbfb5dd7797c8460c0..d18a449409683cb8f87e7998259ea4141d3d0eca 100644 (file)
@@ -54,18 +54,17 @@ ENTRY(efi_stub_entry)
        b.eq    efi_load_fail
 
        /*
-        * efi_entry() will have relocated the kernel image if necessary
-        * and we return here with device tree address in x0 and the kernel
-        * entry point stored at *image_addr. Save those values in registers
-        * which are callee preserved.
+        * efi_entry() will have copied the kernel image if necessary and we
+        * return here with device tree address in x0 and the kernel entry
+        * point stored at *image_addr. Save those values in registers which
+        * are callee preserved.
         */
        mov     x20, x0         // DTB address
        ldr     x0, [sp, #16]   // relocated _text address
        mov     x21, x0
 
        /*
-        * Flush dcache covering current runtime addresses
-        * of kernel text/data. Then flush all of icache.
+        * Calculate size of the kernel Image (same for original and copy).
         */
        adrp    x1, _text
        add     x1, x1, #:lo12:_text
@@ -73,9 +72,24 @@ ENTRY(efi_stub_entry)
        add     x2, x2, #:lo12:_edata
        sub     x1, x2, x1
 
+       /*
+        * Flush the copied Image to the PoC, and ensure it is not shadowed by
+        * stale icache entries from before relocation.
+        */
        bl      __flush_dcache_area
        ic      ialluis
 
+       /*
+        * Ensure that the rest of this function (in the original Image) is
+        * visible when the caches are disabled. The I-cache can't have stale
+        * entries for the VA range of the current image, so no maintenance is
+        * necessary.
+        */
+       adr     x0, efi_stub_entry
+       adr     x1, efi_stub_entry_end
+       sub     x1, x1, x0
+       bl      __flush_dcache_area
+
        /* Turn off Dcache and MMU */
        mrs     x0, CurrentEL
        cmp     x0, #CurrentEL_EL2
@@ -105,4 +119,5 @@ efi_load_fail:
        ldp     x29, x30, [sp], #32
        ret
 
+efi_stub_entry_end:
 ENDPROC(efi_stub_entry)
index e007714ded04e6a0854657b0cc39f244e06de18c..8cd27fedc8b6a14caa41fac458549695d4ecb7f9 100644 (file)
@@ -163,9 +163,10 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
                 * which ends with "dsb; isb" pair guaranteeing global
                 * visibility.
                 */
-               atomic_set(&pp->cpu_count, -1);
+               /* Notify other processors with an additional increment. */
+               atomic_inc(&pp->cpu_count);
        } else {
-               while (atomic_read(&pp->cpu_count) != -1)
+               while (atomic_read(&pp->cpu_count) <= num_online_cpus())
                        cpu_relax();
                isb();
        }
index 866c1c821860d9689dc363b73001529ee3b5bec5..663da771580ac21a82faae5f5062fa6361e1f794 100644 (file)
@@ -528,7 +528,7 @@ static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
        if (WARN_ON_ONCE(!index))
                return -EINVAL;
 
-       if (state->type == PSCI_POWER_STATE_TYPE_STANDBY)
+       if (state[index - 1].type == PSCI_POWER_STATE_TYPE_STANDBY)
                ret = psci_ops.cpu_suspend(state[index - 1], 0);
        else
                ret = __cpu_suspend(index, psci_suspend_finisher);
index 4cc3b719208e0a8238930d44b409b2f7c2beb9f5..3d7c2df89946cc1d1606a4b3401115f10e44ab71 100644 (file)
@@ -424,6 +424,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        /* VBAR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
          NULL, reset_val, VBAR_EL1, 0 },
+
+       /* ICC_SRE_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
+         trap_raz_wi },
+
        /* CONTEXTIDR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
          access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
@@ -690,6 +695,10 @@ static const struct sys_reg_desc cp15_regs[] = {
        { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
        { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
        { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
+
+       /* ICC_SRE */
+       { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
+
        { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
 };
 
index 6e0ed93d51fe1850ee27176fc9222dac5416b74d..c17967fdf5f6007330ba65beb7a05bd347600764 100644 (file)
@@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2   )
        sub     x1, x1, #2
 4:     adds    x1, x1, #1
        b.mi    5f
-       strb    wzr, [x0]
+USER(9f, strb  wzr, [x0]       )
 5:     mov     x0, #0
        ret
 ENDPROC(__clear_user)
index 0bf90d26e7455daf38bc5710a1d01222543cda7f..f4f8b500f74c634ce62d36f25259e26c7826d2b7 100644 (file)
@@ -202,7 +202,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
 }
 
 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
-                                 unsigned long end, unsigned long phys,
+                                 unsigned long end, phys_addr_t phys,
                                  int map_io)
 {
        pud_t *pud;
index ec6b9acb6bea8733a5f17bd7afe4a0a249ff4de9..dbe46f43884df183a69a2da387a941f55dbb371d 100644 (file)
@@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
 
        for (i = 0; i < npages; i++) {
                pfn = gfn_to_pfn(kvm, base_gfn + i);
-               if (!kvm_is_mmio_pfn(pfn)) {
+               if (!kvm_is_reserved_pfn(pfn)) {
                        kvm_set_pmt_entry(kvm, base_gfn + i,
                                        pfn << PAGE_SHIFT,
                                _PAGE_AR_RWX | _PAGE_MA_WB);
index 4ef7a54813e6b72d88a0d40109a26347b91aa7c6..75e75d7b1702fb6434c59e9155dbba1d71623b17 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            354
+#define NR_syscalls            355
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index b419c6b7ac3739b150000dfd13aff28d3dcc372a..2c1bec9a14b67da42a8ed09b644373d0cf35b5ef 100644 (file)
 #define __NR_renameat2         351
 #define __NR_getrandom         352
 #define __NR_memfd_create      353
+#define __NR_bpf               354
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index 05b46c2b08b8d8d4be26c56e8be4d245cef3dab6..2ca219e184cd16e6ebad1bd9123061695691c843 100644 (file)
@@ -374,4 +374,5 @@ ENTRY(sys_call_table)
        .long sys_renameat2
        .long sys_getrandom
        .long sys_memfd_create
+       .long sys_bpf
 
index f43aa536c517437bc6778d6adb1d9e0212effc8c..9536ef912f594651be7e403264f3eb30c3355384 100644 (file)
@@ -2101,9 +2101,17 @@ config 64BIT_PHYS_ADDR
 config ARCH_PHYS_ADDR_T_64BIT
        def_bool 64BIT_PHYS_ADDR
 
+choice
+       prompt "SmartMIPS or microMIPS ASE support"
+
+config CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS
+       bool "None"
+       help
+         Select this if you want neither microMIPS nor SmartMIPS support
+
 config CPU_HAS_SMARTMIPS
        depends on SYS_SUPPORTS_SMARTMIPS
-       bool "Support for the SmartMIPS ASE"
+       bool "SmartMIPS"
        help
          SmartMIPS is a extension of the MIPS32 architecture aimed at
          increased security at both hardware and software level for
@@ -2115,11 +2123,13 @@ config CPU_HAS_SMARTMIPS
 
 config CPU_MICROMIPS
        depends on SYS_SUPPORTS_MICROMIPS
-       bool "Build kernel using microMIPS ISA"
+       bool "microMIPS"
        help
          When this option is enabled the kernel will be built using the
          microMIPS ISA
 
+endchoice
+
 config CPU_HAS_MSA
        bool "Support for the MIPS SIMD Architecture (EXPERIMENTAL)"
        depends on CPU_SUPPORTS_MSA
index 23cb94806fbc5d0f370110cc0da8936f54b80719..58076472bdd8e85eb8eaba14e8e3c21b6c4dfdc1 100644 (file)
@@ -93,6 +93,15 @@ LDFLAGS_vmlinux                      += -G 0 -static -n -nostdlib
 KBUILD_AFLAGS_MODULE           += -mlong-calls
 KBUILD_CFLAGS_MODULE           += -mlong-calls
 
+#
+# pass -msoft-float to GAS if it supports it.  However on newer binutils
+# (specifically newer than 2.24.51.20140728) we then also need to explicitly
+# set ".set hardfloat" in all files which manipulate floating point registers.
+#
+ifneq ($(call as-option,-Wa$(comma)-msoft-float,),)
+       cflags-y                += -DGAS_HAS_SET_HARDFLOAT -Wa,-msoft-float
+endif
+
 cflags-y += -ffreestanding
 
 #
index 7417340496756d30d159aae9b48e4af0f8edf531..2bc4aa95944e462d84673bb974e2dde119fb6bdf 100644 (file)
@@ -809,6 +809,7 @@ static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
        .irq_set_type = octeon_irq_ciu_gpio_set_type,
 #ifdef CONFIG_SMP
        .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
 #endif
        .flags = IRQCHIP_SET_TYPE_MASKED,
 };
@@ -823,6 +824,7 @@ static struct irq_chip octeon_irq_chip_ciu_gpio = {
        .irq_set_type = octeon_irq_ciu_gpio_set_type,
 #ifdef CONFIG_SMP
        .irq_set_affinity = octeon_irq_ciu_set_affinity,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
 #endif
        .flags = IRQCHIP_SET_TYPE_MASKED,
 };
index e38c2811d4e23645e962df3b2f27fee3cdfb9804..cdac7b3eeaf7fa6524b8f6ffb92edc7209eef4c5 100644 (file)
@@ -13,6 +13,8 @@
 #include <asm/mipsregs.h>
 
        .macro  fpu_save_single thread tmp=t0
+       .set push
+       SET_HARDFLOAT
        cfc1    \tmp,  fcr31
        swc1    $f0,  THREAD_FPR0_LS64(\thread)
        swc1    $f1,  THREAD_FPR1_LS64(\thread)
        swc1    $f30, THREAD_FPR30_LS64(\thread)
        swc1    $f31, THREAD_FPR31_LS64(\thread)
        sw      \tmp, THREAD_FCR31(\thread)
+       .set pop
        .endm
 
        .macro  fpu_restore_single thread tmp=t0
+       .set push
+       SET_HARDFLOAT
        lw      \tmp, THREAD_FCR31(\thread)
        lwc1    $f0,  THREAD_FPR0_LS64(\thread)
        lwc1    $f1,  THREAD_FPR1_LS64(\thread)
@@ -84,6 +89,7 @@
        lwc1    $f30, THREAD_FPR30_LS64(\thread)
        lwc1    $f31, THREAD_FPR31_LS64(\thread)
        ctc1    \tmp, fcr31
+       .set pop
        .endm
 
        .macro  cpu_save_nonscratch thread
index cd9a98bc8f606197a15b70d729aa66a0431f3e0e..6caf8766b80f161ea7a620ecc33cca7768e1d4b0 100644 (file)
@@ -57,6 +57,8 @@
 #endif /* CONFIG_CPU_MIPSR2 */
 
        .macro  fpu_save_16even thread tmp=t0
+       .set    push
+       SET_HARDFLOAT
        cfc1    \tmp, fcr31
        sdc1    $f0,  THREAD_FPR0_LS64(\thread)
        sdc1    $f2,  THREAD_FPR2_LS64(\thread)
        sdc1    $f28, THREAD_FPR28_LS64(\thread)
        sdc1    $f30, THREAD_FPR30_LS64(\thread)
        sw      \tmp, THREAD_FCR31(\thread)
+       .set    pop
        .endm
 
        .macro  fpu_save_16odd thread
        .set    push
        .set    mips64r2
+       SET_HARDFLOAT
        sdc1    $f1,  THREAD_FPR1_LS64(\thread)
        sdc1    $f3,  THREAD_FPR3_LS64(\thread)
        sdc1    $f5,  THREAD_FPR5_LS64(\thread)
        .endm
 
        .macro  fpu_restore_16even thread tmp=t0
+       .set    push
+       SET_HARDFLOAT
        lw      \tmp, THREAD_FCR31(\thread)
        ldc1    $f0,  THREAD_FPR0_LS64(\thread)
        ldc1    $f2,  THREAD_FPR2_LS64(\thread)
        .macro  fpu_restore_16odd thread
        .set    push
        .set    mips64r2
+       SET_HARDFLOAT
        ldc1    $f1,  THREAD_FPR1_LS64(\thread)
        ldc1    $f3,  THREAD_FPR3_LS64(\thread)
        ldc1    $f5,  THREAD_FPR5_LS64(\thread)
        .macro  cfcmsa  rd, cs
        .set    push
        .set    noat
+       SET_HARDFLOAT
        .insn
        .word   CFC_MSA_INSN | (\cs << 11)
        move    \rd, $1
        .macro  ctcmsa  cd, rs
        .set    push
        .set    noat
+       SET_HARDFLOAT
        move    $1, \rs
        .word   CTC_MSA_INSN | (\cd << 6)
        .set    pop
        .macro  ld_d    wd, off, base
        .set    push
        .set    noat
+       SET_HARDFLOAT
        add     $1, \base, \off
        .word   LDD_MSA_INSN | (\wd << 6)
        .set    pop
        .macro  st_d    wd, off, base
        .set    push
        .set    noat
+       SET_HARDFLOAT
        add     $1, \base, \off
        .word   STD_MSA_INSN | (\wd << 6)
        .set    pop
        .macro  copy_u_w        rd, ws, n
        .set    push
        .set    noat
+       SET_HARDFLOAT
        .insn
        .word   COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
        /* move triggers an assembler bug... */
        .macro  copy_u_d        rd, ws, n
        .set    push
        .set    noat
+       SET_HARDFLOAT
        .insn
        .word   COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
        /* move triggers an assembler bug... */
        .macro  insert_w        wd, n, rs
        .set    push
        .set    noat
+       SET_HARDFLOAT
        /* move triggers an assembler bug... */
        or      $1, \rs, zero
        .word   INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
        .macro  insert_d        wd, n, rs
        .set    push
        .set    noat
+       SET_HARDFLOAT
        /* move triggers an assembler bug... */
        or      $1, \rs, zero
        .word   INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
        st_d    31, THREAD_FPR31, \thread
        .set    push
        .set    noat
+       SET_HARDFLOAT
        cfcmsa  $1, MSA_CSR
        sw      $1, THREAD_MSA_CSR(\thread)
        .set    pop
        .macro  msa_restore_all thread
        .set    push
        .set    noat
+       SET_HARDFLOAT
        lw      $1, THREAD_MSA_CSR(\thread)
        ctcmsa  MSA_CSR, $1
        .set    pop
        .macro  msa_init_all_upper
        .set    push
        .set    noat
+       SET_HARDFLOAT
        not     $1, zero
        msa_init_upper  0
        .set    pop
index 429481f9028dd08119e9961ab8669c04181a6ad3..f184ba088532635068f1944f858c249675dbd991 100644 (file)
 
 #include <asm/sgidefs.h>
 
+/*
+ * starting with binutils 2.24.51.20140729, MIPS binutils warn about mixing
+ * hardfloat and softfloat object files.  The kernel build uses soft-float by
+ * default, so we also need to pass -msoft-float along to GAS if it supports it.
+ * But this in turn causes assembler errors in files which access hardfloat
+ * registers.  We detect if GAS supports "-msoft-float" in the Makefile and
+ * explicitly put ".set hardfloat" where floating point registers are touched.
+ */
+#ifdef GAS_HAS_SET_HARDFLOAT
+#define SET_HARDFLOAT .set hardfloat
+#else
+#define SET_HARDFLOAT
+#endif
+
 #if _MIPS_SIM == _MIPS_SIM_ABI32
 
 /*
index 4d0aeda6839741ea27f956e9049958cdab297b56..dd562414cd5effc97f834da299c052d2922e325a 100644 (file)
@@ -145,8 +145,8 @@ static inline void lose_fpu(int save)
        if (is_msa_enabled()) {
                if (save) {
                        save_msa(current);
-                       asm volatile("cfc1 %0, $31"
-                               : "=r"(current->thread.fpu.fcr31));
+                       current->thread.fpu.fcr31 =
+                                       read_32bit_cp1_register(CP1_STATUS);
                }
                disable_msa();
                clear_thread_flag(TIF_USEDMSA);
index e194f957ca8c42a0af82f0621c425308a25e4d53..fdbff44e5482cd20acc5efc798091894a3292c4f 100644 (file)
 #define WORD_INSN ".word"
 #endif
 
+#ifdef CONFIG_CPU_MICROMIPS
+#define NOP_INSN "nop32"
+#else
+#define NOP_INSN "nop"
+#endif
+
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-       asm_volatile_goto("1:\tnop\n\t"
+       asm_volatile_goto("1:\t" NOP_INSN "\n\t"
                "nop\n\t"
                ".pushsection __jump_table,  \"aw\"\n\t"
                WORD_INSN " 1b, %l[l_yes], %0\n\t"
index 7d28f95b0512ea27e1aeb7fac6f22ecc561a45fd..6d69332f21ecd53e09eebfba0de74f1c9fc1daa6 100644 (file)
 #define cpu_has_mcheck         0
 #define cpu_has_mdmx           0
 #define cpu_has_mips16         0
-#define cpu_has_mips32r1       0
 #define cpu_has_mips32r2       0
 #define cpu_has_mips3d         0
-#define cpu_has_mips64r1       0
 #define cpu_has_mips64r2       0
 #define cpu_has_mipsmt         0
 #define cpu_has_prefetch       0
index cf3b580c3df6de0c09e6a47f05f670ae79b94666..22a135ac91de3830e885342b834feb47ab109eff 100644 (file)
 #define MIPS_CONF6_SYND                (_ULCAST_(1) << 13)
 /* proAptiv FTLB on/off bit */
 #define MIPS_CONF6_FTLBEN      (_ULCAST_(1) << 15)
+/* FTLB probability bits */
+#define MIPS_CONF6_FTLBP_SHIFT (16)
 
 #define MIPS_CONF7_WII         (_ULCAST_(1) << 31)
 
@@ -1324,7 +1326,7 @@ do {                                                                      \
 /*
  * Macros to access the floating point coprocessor control registers
  */
-#define read_32bit_cp1_register(source)                                        \
+#define _read_32bit_cp1_register(source, gas_hardfloat)                        \
 ({                                                                     \
        int __res;                                                      \
                                                                        \
@@ -1334,12 +1336,21 @@ do {                                                                    \
        "       # gas fails to assemble cfc1 for some archs,    \n"     \
        "       # like Octeon.                                  \n"     \
        "       .set    mips1                                   \n"     \
+       "       "STR(gas_hardfloat)"                            \n"     \
        "       cfc1    %0,"STR(source)"                        \n"     \
        "       .set    pop                                     \n"     \
        : "=r" (__res));                                                \
        __res;                                                          \
 })
 
+#ifdef GAS_HAS_SET_HARDFLOAT
+#define read_32bit_cp1_register(source)                                        \
+       _read_32bit_cp1_register(source, .set hardfloat)
+#else
+#define read_32bit_cp1_register(source)                                        \
+       _read_32bit_cp1_register(source, )
+#endif
+
 #ifdef HAVE_AS_DSP
 #define rddsp(mask)                                                    \
 ({                                                                     \
index 4520adc8699b9c00835a4340c8da217e90dcc305..cd6e0afc683366e598eadbaf8c572e0434fdb9bf 100644 (file)
@@ -257,7 +257,11 @@ static inline void protected_flush_icache_line(unsigned long addr)
  */
 static inline void protected_writeback_dcache_line(unsigned long addr)
 {
+#ifdef CONFIG_EVA
+       protected_cachee_op(Hit_Writeback_Inv_D, addr);
+#else
        protected_cache_op(Hit_Writeback_Inv_D, addr);
+#endif
 }
 
 static inline void protected_writeback_scache_line(unsigned long addr)
index a10951090234073cae8aa6ea9d8b5542faef6a22..22a5624e2fd2dcecf4b5592097e0c18620012426 100644 (file)
@@ -301,7 +301,8 @@ do {                                                                        \
                        __get_kernel_common((x), size, __gu_ptr);       \
                else                                                    \
                        __get_user_common((x), size, __gu_ptr);         \
-       }                                                               \
+       } else                                                          \
+               (x) = 0;                                                \
                                                                        \
        __gu_err;                                                       \
 })
@@ -316,6 +317,7 @@ do {                                                                        \
        "       .insn                                           \n"     \
        "       .section .fixup,\"ax\"                          \n"     \
        "3:     li      %0, %4                                  \n"     \
+       "       move    %1, $0                                  \n"     \
        "       j       2b                                      \n"     \
        "       .previous                                       \n"     \
        "       .section __ex_table,\"a\"                       \n"     \
@@ -630,6 +632,7 @@ do {                                                                        \
        "       .insn                                           \n"     \
        "       .section .fixup,\"ax\"                          \n"     \
        "3:     li      %0, %4                                  \n"     \
+       "       move    %1, $0                                  \n"     \
        "       j       2b                                      \n"     \
        "       .previous                                       \n"     \
        "       .section __ex_table,\"a\"                       \n"     \
@@ -773,10 +776,11 @@ extern void __put_user_unaligned_unknown(void);
        "jal\t" #destination "\n\t"
 #endif
 
-#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
-#define DADDI_SCRATCH "$0"
-#else
+#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&   \
+                                             defined(CONFIG_CPU_HAS_PREFETCH))
 #define DADDI_SCRATCH "$3"
+#else
+#define DADDI_SCRATCH "$0"
 #endif
 
 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
@@ -1418,7 +1422,7 @@ static inline long __strnlen_user(const char __user *s, long n)
 }
 
 /*
- * strlen_user: - Get the size of a string in user space.
+ * strnlen_user: - Get the size of a string in user space.
  * @str: The string to measure.
  *
  * Context: User context only. This function may sleep.
@@ -1427,9 +1431,7 @@ static inline long __strnlen_user(const char __user *s, long n)
  *
  * Returns the size of the string INCLUDING the terminating NUL.
  * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
+ * If the string is too long, returns a value greater than @n.
  */
 static inline long strnlen_user(const char __user *s, long n)
 {
index fdb4923777d1347822517b9671b80f378c74d16b..d001bb1ad177e7b6e2df2fbb7e42e894784b2e91 100644 (file)
 #define __NR_seccomp                   (__NR_Linux + 352)
 #define __NR_getrandom                 (__NR_Linux + 353)
 #define __NR_memfd_create              (__NR_Linux + 354)
+#define __NR_bpf                       (__NR_Linux + 355)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            354
+#define __NR_Linux_syscalls            355
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                354
+#define __NR_O32_Linux_syscalls                355
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_seccomp                   (__NR_Linux + 312)
 #define __NR_getrandom                 (__NR_Linux + 313)
 #define __NR_memfd_create              (__NR_Linux + 314)
+#define __NR_bpf                       (__NR_Linux + 315)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            314
+#define __NR_Linux_syscalls            315
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         314
+#define __NR_64_Linux_syscalls         315
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_seccomp                   (__NR_Linux + 316)
 #define __NR_getrandom                 (__NR_Linux + 317)
 #define __NR_memfd_create              (__NR_Linux + 318)
+#define __NR_bpf                       (__NR_Linux + 319)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            318
+#define __NR_Linux_syscalls            319
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                318
+#define __NR_N32_Linux_syscalls                319
 
 #endif /* _UAPI_ASM_UNISTD_H */
index 290c23b516789ba16193f7b72fa61eafbf7907b2..86495072a922f31e0214cad3b6ac71c10aaf5fa8 100644 (file)
@@ -208,7 +208,6 @@ bmips_reset_nmi_vec_end:
 END(bmips_reset_nmi_vec)
 
        .set    pop
-       .previous
 
 /***********************************************************************
  * CPU1 warm restart vector (used for second and subsequent boots).
@@ -281,5 +280,3 @@ LEAF(bmips_enable_xks01)
        jr      ra
 
 END(bmips_enable_xks01)
-
-       .previous
index 7b2df224f0414805d8260e4fad30b1d0050f65b5..4d7d99d601cc13219e9d8f9631da6002b3d9df9d 100644 (file)
@@ -144,7 +144,7 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                case mm_bc1t_op:
                        preempt_disable();
                        if (is_fpu_owner())
-                               asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+                               fcr31 = read_32bit_cp1_register(CP1_STATUS);
                        else
                                fcr31 = current->thread.fpu.fcr31;
                        preempt_enable();
@@ -562,11 +562,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
        case cop1_op:
                preempt_disable();
                if (is_fpu_owner())
-                       asm volatile(
-                               ".set push\n"
-                               "\t.set mips1\n"
-                               "\tcfc1\t%0,$31\n"
-                               "\t.set pop" : "=r" (fcr31));
+                       fcr31 = read_32bit_cp1_register(CP1_STATUS);
                else
                        fcr31 = current->thread.fpu.fcr31;
                preempt_enable();
index e6e97d2a5c9e68cccde81ab0f181184d1e27fd13..0384b05ab5a02413cbcb11a163375029f285255f 100644 (file)
@@ -229,6 +229,7 @@ LEAF(mips_cps_core_init)
         nop
 
        .set    push
+       .set    mips32r2
        .set    mt
 
        /* Only allow 1 TC per VPE to execute... */
@@ -345,6 +346,7 @@ LEAF(mips_cps_boot_vpes)
         nop
 
        .set    push
+       .set    mips32r2
        .set    mt
 
 1:     /* Enter VPE configuration state */
index 94c4a0c0a577909849326dd38999d30ef017ab99..dc49cf30c2db46f9e0f2caef74548459c71f6714 100644 (file)
@@ -193,6 +193,32 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
 static char unknown_isa[] = KERN_ERR \
        "Unsupported ISA type, c0.config0: %d.";
 
+static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c)
+{
+
+       unsigned int probability = c->tlbsize / c->tlbsizevtlb;
+
+       /*
+        * 0 = All TLBWR instructions go to FTLB
+        * 1 = 15:1: For every 16 TBLWR instructions, 15 go to the
+        * FTLB and 1 goes to the VTLB.
+        * 2 = 7:1: As above with 7:1 ratio.
+        * 3 = 3:1: As above with 3:1 ratio.
+        *
+        * Use the linear midpoint as the probability threshold.
+        */
+       if (probability >= 12)
+               return 1;
+       else if (probability >= 6)
+               return 2;
+       else
+               /*
+                * So FTLB is less than 4 times bigger than VTLB.
+                * A 3:1 ratio can still be useful though.
+                */
+               return 3;
+}
+
 static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
 {
        unsigned int config6;
@@ -203,9 +229,14 @@ static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
        case CPU_P5600:
                /* proAptiv & related cores use Config6 to enable the FTLB */
                config6 = read_c0_config6();
+               /* Clear the old probability value */
+               config6 &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
                if (enable)
                        /* Enable FTLB */
-                       write_c0_config6(config6 | MIPS_CONF6_FTLBEN);
+                       write_c0_config6(config6 |
+                                        (calculate_ftlb_probability(c)
+                                         << MIPS_CONF6_FTLBP_SHIFT)
+                                        | MIPS_CONF6_FTLBEN);
                else
                        /* Disable FTLB */
                        write_c0_config6(config6 &  ~MIPS_CONF6_FTLBEN);
@@ -757,31 +788,34 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                        c->cputype = CPU_LOONGSON2;
                        __cpu_name[cpu] = "ICT Loongson-2";
                        set_elf_platform(cpu, "loongson2e");
+                       set_isa(c, MIPS_CPU_ISA_III);
                        break;
                case PRID_REV_LOONGSON2F:
                        c->cputype = CPU_LOONGSON2;
                        __cpu_name[cpu] = "ICT Loongson-2";
                        set_elf_platform(cpu, "loongson2f");
+                       set_isa(c, MIPS_CPU_ISA_III);
                        break;
                case PRID_REV_LOONGSON3A:
                        c->cputype = CPU_LOONGSON3;
-                       c->writecombine = _CACHE_UNCACHED_ACCELERATED;
                        __cpu_name[cpu] = "ICT Loongson-3";
                        set_elf_platform(cpu, "loongson3a");
+                       set_isa(c, MIPS_CPU_ISA_M64R1);
                        break;
                case PRID_REV_LOONGSON3B_R1:
                case PRID_REV_LOONGSON3B_R2:
                        c->cputype = CPU_LOONGSON3;
                        __cpu_name[cpu] = "ICT Loongson-3";
                        set_elf_platform(cpu, "loongson3b");
+                       set_isa(c, MIPS_CPU_ISA_M64R1);
                        break;
                }
 
-               set_isa(c, MIPS_CPU_ISA_III);
                c->options = R4K_OPTS |
                             MIPS_CPU_FPU | MIPS_CPU_LLSC |
                             MIPS_CPU_32FPR;
                c->tlbsize = 64;
+               c->writecombine = _CACHE_UNCACHED_ACCELERATED;
                break;
        case PRID_IMP_LOONGSON_32:  /* Loongson-1 */
                decode_configs(c);
index ac35e12cb1f3585d05f4f336489c161b4b17a950..a5e26dd9059256ed7c2a2033210bcd1bbc99b6e3 100644 (file)
@@ -358,6 +358,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
        .set    push
        /* gas fails to assemble cfc1 for some archs (octeon).*/ \
        .set    mips1
+       SET_HARDFLOAT
        cfc1    a1, fcr31
        li      a2, ~(0x3f << 12)
        and     a2, a1
index 6001610cfe55f846a403633a13cfa8973a23e07a..dda800e9e73167d3637f972516743b12e25a9ddc 100644 (file)
 
 #ifdef HAVE_JUMP_LABEL
 
-#define J_RANGE_MASK ((1ul << 28) - 1)
+/*
+ * Define parameters for the standard MIPS and the microMIPS jump
+ * instruction encoding respectively:
+ *
+ * - the ISA bit of the target, either 0 or 1 respectively,
+ *
+ * - the amount the jump target address is shifted right to fit in the
+ *   immediate field of the machine instruction, either 2 or 1,
+ *
+ * - the mask determining the size of the jump region relative to the
+ *   delay-slot instruction, either 256MB or 128MB,
+ *
+ * - the jump target alignment, either 4 or 2 bytes.
+ */
+#define J_ISA_BIT      IS_ENABLED(CONFIG_CPU_MICROMIPS)
+#define J_RANGE_SHIFT  (2 - J_ISA_BIT)
+#define J_RANGE_MASK   ((1ul << (26 + J_RANGE_SHIFT)) - 1)
+#define J_ALIGN_MASK   ((1ul << J_RANGE_SHIFT) - 1)
 
 void arch_jump_label_transform(struct jump_entry *e,
                               enum jump_label_type type)
 {
+       union mips_instruction *insn_p;
        union mips_instruction insn;
-       union mips_instruction *insn_p =
-               (union mips_instruction *)(unsigned long)e->code;
 
-       /* Jump only works within a 256MB aligned region. */
-       BUG_ON((e->target & ~J_RANGE_MASK) != (e->code & ~J_RANGE_MASK));
+       insn_p = (union mips_instruction *)msk_isa16_mode(e->code);
+
+       /* Jump only works within an aligned region its delay slot is in. */
+       BUG_ON((e->target & ~J_RANGE_MASK) != ((e->code + 4) & ~J_RANGE_MASK));
 
-       /* Target must have 4 byte alignment. */
-       BUG_ON((e->target & 3) != 0);
+       /* Target must have the right alignment and ISA must be preserved. */
+       BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
 
        if (type == JUMP_LABEL_ENABLE) {
-               insn.j_format.opcode = j_op;
-               insn.j_format.target = (e->target & J_RANGE_MASK) >> 2;
+               insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
+               insn.j_format.target = e->target >> J_RANGE_SHIFT;
        } else {
                insn.word = 0; /* nop */
        }
 
        get_online_cpus();
        mutex_lock(&text_mutex);
-       *insn_p = insn;
+       if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
+               insn_p->halfword[0] = insn.word >> 16;
+               insn_p->halfword[1] = insn.word;
+       } else
+               *insn_p = insn;
 
        flush_icache_range((unsigned long)insn_p,
                           (unsigned long)insn_p + sizeof(*insn_p));
index f31063dbdaebcffe4521be0d04d045c726ba85aa..5ce3b746cedc0bdea6e305cc72ef2121f0026053 100644 (file)
@@ -28,6 +28,8 @@
        .set    mips1
        /* Save floating point context */
 LEAF(_save_fp_context)
+       .set    push
+       SET_HARDFLOAT
        li      v0, 0                                   # assume success
        cfc1    t1,fcr31
        EX(swc1 $f0,(SC_FPREGS+0)(a0))
@@ -65,6 +67,7 @@ LEAF(_save_fp_context)
        EX(sw   t1,(SC_FPC_CSR)(a0))
        cfc1    t0,$0                           # implementation/version
        jr      ra
+       .set    pop
        .set    nomacro
         EX(sw  t0,(SC_FPC_EIR)(a0))
        .set    macro
@@ -80,6 +83,8 @@ LEAF(_save_fp_context)
  * stack frame which might have been changed by the user.
  */
 LEAF(_restore_fp_context)
+       .set    push
+       SET_HARDFLOAT
        li      v0, 0                                   # assume success
        EX(lw t0,(SC_FPC_CSR)(a0))
        EX(lwc1 $f0,(SC_FPREGS+0)(a0))
@@ -116,6 +121,7 @@ LEAF(_restore_fp_context)
        EX(lwc1 $f31,(SC_FPREGS+248)(a0))
        jr      ra
         ctc1   t0,fcr31
+       .set    pop
        END(_restore_fp_context)
        .set    reorder
 
index 20b7b040e76f1c4e8d9a019edae6014f3ee3fea5..435ea652f5fae7af10c392b884faecdc8ea430cf 100644 (file)
@@ -120,6 +120,9 @@ LEAF(_restore_fp)
 
 #define FPU_DEFAULT  0x00000000
 
+       .set push
+       SET_HARDFLOAT
+
 LEAF(_init_fpu)
        mfc0    t0, CP0_STATUS
        li      t1, ST0_CU1
@@ -165,3 +168,5 @@ LEAF(_init_fpu)
        mtc1    t0, $f31
        jr      ra
        END(_init_fpu)
+
+       .set pop
index 8352523568e6d7c993a5f42e52d5121c57ad81b8..6c160c67984c014e53a3a068698fdef58a9b9345 100644 (file)
 #include <asm/asm-offsets.h>
 #include <asm/regdef.h>
 
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
        .macro  EX insn, reg, src
        .set    push
+       SET_HARDFLOAT
        .set    nomacro
 .ex\@: \insn   \reg, \src
        .set    pop
        .set    arch=r4000
 
 LEAF(_save_fp_context)
+       .set    push
+       SET_HARDFLOAT
        cfc1    t1, fcr31
+       .set    pop
 
 #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
        .set    push
+       SET_HARDFLOAT
 #ifdef CONFIG_CPU_MIPS32_R2
-       .set    mips64r2
+       .set    mips32r2
+       .set    fp=64
        mfc0    t0, CP0_STATUS
        sll     t0, t0, 5
        bgez    t0, 1f                  # skip storing odd if FR=0
@@ -64,6 +73,8 @@ LEAF(_save_fp_context)
 1:     .set    pop
 #endif
 
+       .set push
+       SET_HARDFLOAT
        /* Store the 16 even double precision registers */
        EX      sdc1 $f0, SC_FPREGS+0(a0)
        EX      sdc1 $f2, SC_FPREGS+16(a0)
@@ -84,11 +95,14 @@ LEAF(_save_fp_context)
        EX      sw t1, SC_FPC_CSR(a0)
        jr      ra
         li     v0, 0                                   # success
+       .set pop
        END(_save_fp_context)
 
 #ifdef CONFIG_MIPS32_COMPAT
        /* Save 32-bit process floating point context */
 LEAF(_save_fp_context32)
+       .set push
+       SET_HARDFLOAT
        cfc1    t1, fcr31
 
        mfc0    t0, CP0_STATUS
@@ -134,6 +148,7 @@ LEAF(_save_fp_context32)
        EX      sw t1, SC32_FPC_CSR(a0)
        cfc1    t0, $0                          # implementation/version
        EX      sw t0, SC32_FPC_EIR(a0)
+       .set pop
 
        jr      ra
         li     v0, 0                                   # success
@@ -150,8 +165,10 @@ LEAF(_restore_fp_context)
 
 #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
        .set    push
+       SET_HARDFLOAT
 #ifdef CONFIG_CPU_MIPS32_R2
-       .set    mips64r2
+       .set    mips32r2
+       .set    fp=64
        mfc0    t0, CP0_STATUS
        sll     t0, t0, 5
        bgez    t0, 1f                  # skip loading odd if FR=0
@@ -175,6 +192,8 @@ LEAF(_restore_fp_context)
        EX      ldc1 $f31, SC_FPREGS+248(a0)
 1:     .set pop
 #endif
+       .set push
+       SET_HARDFLOAT
        EX      ldc1 $f0, SC_FPREGS+0(a0)
        EX      ldc1 $f2, SC_FPREGS+16(a0)
        EX      ldc1 $f4, SC_FPREGS+32(a0)
@@ -192,6 +211,7 @@ LEAF(_restore_fp_context)
        EX      ldc1 $f28, SC_FPREGS+224(a0)
        EX      ldc1 $f30, SC_FPREGS+240(a0)
        ctc1    t1, fcr31
+       .set pop
        jr      ra
         li     v0, 0                                   # success
        END(_restore_fp_context)
@@ -199,6 +219,8 @@ LEAF(_restore_fp_context)
 #ifdef CONFIG_MIPS32_COMPAT
 LEAF(_restore_fp_context32)
        /* Restore an o32 sigcontext.  */
+       .set push
+       SET_HARDFLOAT
        EX      lw t1, SC32_FPC_CSR(a0)
 
        mfc0    t0, CP0_STATUS
@@ -242,6 +264,7 @@ LEAF(_restore_fp_context32)
        ctc1    t1, fcr31
        jr      ra
         li     v0, 0                                   # success
+       .set pop
        END(_restore_fp_context32)
 #endif
 
index 4c4ec1812420b873663d682d706860f394d7db00..64591e671878f41d9c4d0551806783b65578eb23 100644 (file)
@@ -22,6 +22,9 @@
 
 #include <asm/asmmacro.h>
 
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
 /*
  * Offset to the current process status flags, the first 32 bytes of the
  * stack are not used.
        bgtz    a3, 1f
 
        /* Save 128b MSA vector context + scalar FP control & status. */
+       .set push
+       SET_HARDFLOAT
        cfc1    t1, fcr31
        msa_save_all    a0
+       .set pop        /* SET_HARDFLOAT */
+
        sw      t1, THREAD_FCR31(a0)
        b       2f
 
@@ -161,6 +168,9 @@ LEAF(_init_msa_upper)
 
 #define FPU_DEFAULT  0x00000000
 
+       .set push
+       SET_HARDFLOAT
+
 LEAF(_init_fpu)
        mfc0    t0, CP0_STATUS
        li      t1, ST0_CU1
@@ -232,7 +242,8 @@ LEAF(_init_fpu)
 
 #ifdef CONFIG_CPU_MIPS32_R2
        .set    push
-       .set    mips64r2
+       .set    mips32r2
+       .set    fp=64
        sll     t0, t0, 5                       # is Status.FR set?
        bgez    t0, 1f                          # no: skip setting upper 32b
 
@@ -291,3 +302,5 @@ LEAF(_init_fpu)
 #endif
        jr      ra
        END(_init_fpu)
+
+       .set pop        /* SET_HARDFLOAT */
index da0fbe46d83b040dc54a300d7be5324ce51f1ab3..47077380c15c43aca685a3aacbea837f8cc42a65 100644 (file)
@@ -18,6 +18,9 @@
 
        .set    noreorder
        .set    mips2
+       .set    push
+       SET_HARDFLOAT
+
        /* Save floating point context */
        LEAF(_save_fp_context)
        mfc0    t0,CP0_STATUS
@@ -85,3 +88,5 @@
 1:     jr      ra
         nop
        END(_restore_fp_context)
+
+       .set pop        /* SET_HARDFLOAT */
index 31b1b763cb298841eee156c61752687c4056809d..c5c4fd54d797221256e147a8a0be5278a8806df5 100644 (file)
@@ -94,12 +94,12 @@ int rtlx_open(int index, int can_sleep)
        int ret = 0;
 
        if (index >= RTLX_CHANNELS) {
-               pr_debug(KERN_DEBUG "rtlx_open index out of range\n");
+               pr_debug("rtlx_open index out of range\n");
                return -ENOSYS;
        }
 
        if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
-               pr_debug(KERN_DEBUG "rtlx_open channel %d already opened\n", index);
+               pr_debug("rtlx_open channel %d already opened\n", index);
                ret = -EBUSY;
                goto out_fail;
        }
index 744cd10ba599e260b97e0a53da6d872792f7bd54..00cad1005a16d1fc1925166ec5746e5ac9890154 100644 (file)
@@ -579,3 +579,4 @@ EXPORT(sys_call_table)
        PTR     sys_seccomp
        PTR     sys_getrandom
        PTR     sys_memfd_create
+       PTR     sys_bpf                         /* 4355 */
index 002b1bc09c387c44dcd5ac4157a97ef8223344cb..5251565e344b48f1931f500f52494eadd4e51a04 100644 (file)
@@ -434,4 +434,5 @@ EXPORT(sys_call_table)
        PTR     sys_seccomp
        PTR     sys_getrandom
        PTR     sys_memfd_create
+       PTR     sys_bpf                         /* 5315 */
        .size   sys_call_table,.-sys_call_table
index ca6cbbe9805bf8cdc28657468e93390c1ff117eb..77e74398b828770fa8814ed831c2efd6cb412b40 100644 (file)
@@ -427,4 +427,5 @@ EXPORT(sysn32_call_table)
        PTR     sys_seccomp
        PTR     sys_getrandom
        PTR     sys_memfd_create
+       PTR     sys_bpf
        .size   sysn32_call_table,.-sysn32_call_table
index 9e10d11fbb84799840f48ca08143bf44aeddc644..6f8db9f728e8d7d3f22c0518434ebb844e544953 100644 (file)
@@ -564,4 +564,5 @@ EXPORT(sys32_call_table)
        PTR     sys_seccomp
        PTR     sys_getrandom
        PTR     sys_memfd_create
+       PTR     sys_bpf                         /* 4355 */
        .size   sys32_call_table,.-sys32_call_table
index b3b8f0d9d4a77b9ee4d2f02e0a2761a281ee87b1..f3b635f86c39c085ac67126929d4a7cec89e9702 100644 (file)
@@ -485,7 +485,7 @@ static void __init bootmem_init(void)
  * NOTE: historically plat_mem_setup did the entire platform initialization.
  *      This was rather impractical because it meant plat_mem_setup had to
  * get away without any kind of memory allocator.  To keep old code from
- * breaking plat_setup was just renamed to plat_setup and a second platform
+ * breaking plat_setup was just renamed to plat_mem_setup and a second platform
  * initialization hook for anything else was introduced.
  */
 
@@ -493,7 +493,7 @@ static int usermem __initdata;
 
 static int __init early_parse_mem(char *p)
 {
-       unsigned long start, size;
+       phys_t start, size;
 
        /*
         * If a user specifies memory size, we
@@ -683,7 +683,8 @@ static void __init arch_mem_init(char **cmdline_p)
        dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
        /* Tell bootmem about cma reserved memblock section */
        for_each_memblock(reserved, reg)
-               reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
+               if (reg->size != 0)
+                       reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
 }
 
 static void __init resource_init(void)
index 1d57605e4615288a604403de1a7071d7112b20c0..16f1e4f2bf3c3c08896161106529b4a0c551dd9e 100644 (file)
@@ -658,13 +658,13 @@ static int signal_setup(void)
                save_fp_context = _save_fp_context;
                restore_fp_context = _restore_fp_context;
        } else {
-               save_fp_context = copy_fp_from_sigcontext;
-               restore_fp_context = copy_fp_to_sigcontext;
+               save_fp_context = copy_fp_to_sigcontext;
+               restore_fp_context = copy_fp_from_sigcontext;
        }
 #endif /* CONFIG_SMP */
 #else
-       save_fp_context = copy_fp_from_sigcontext;;
-       restore_fp_context = copy_fp_to_sigcontext;
+       save_fp_context = copy_fp_to_sigcontext;
+       restore_fp_context = copy_fp_from_sigcontext;
 #endif
 
        return 0;
index c17ef80cf65ab0f67946b515c24dab351f684acc..5d3238af9b5cc551ecb0ca9670b242b62f181a73 100644 (file)
        STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@)
 .Ldone\@:
        jr      ra
+        nop
        .if __memcpy == 1
        END(memcpy)
        .set __memcpy, 0
index 91615c2ef0cf969baeff215ca3d8a627e3851d2f..1ef365ab3cd3bfd93cafe61d6cef6fa919644d22 100644 (file)
@@ -34,7 +34,7 @@ static void dump_tlb(int first, int last)
                entrylo0 = read_c0_entrylo0();
 
                /* Unused entries have a virtual address of KSEG0.  */
-               if ((entryhi & 0xffffe000) != 0x80000000
+               if ((entryhi & 0xfffff000) != 0x80000000
                    && (entryhi & 0xfc0) == asid) {
                        /*
                         * Only print entries in use
@@ -43,7 +43,7 @@ static void dump_tlb(int first, int last)
 
                        printk("va=%08lx asid=%08lx"
                               "  [pa=%06lx n=%d d=%d v=%d g=%d]",
-                              (entryhi & 0xffffe000),
+                              (entryhi & 0xfffff000),
                               entryhi & 0xfc0,
                               entrylo0 & PAGE_MASK,
                               (entrylo0 & (1 << 11)) ? 1 : 0,
index f3af6995e2a6e9546175add0d38d0b555ecd1b61..7d12c0dded3ded2009f85ffd7ed7d6e52f645c0c 100644 (file)
@@ -40,9 +40,11 @@ FEXPORT(__strnlen_\func\()_nocheck_asm)
 .else
        EX(lbe, t0, (v0), .Lfault\@)
 .endif
-       PTR_ADDIU       v0, 1
+       .set            noreorder
        bnez            t0, 1b
-1:     PTR_SUBU        v0, a0
+1:      PTR_ADDIU      v0, 1
+       .set            reorder
+       PTR_SUBU        v0, a0
        jr              ra
        END(__strnlen_\func\()_asm)
 
index 0bb9cc9dc621f705dd77b139b0985c01213f9c8e..d87e03330b29ae0dd5e27e9e84536dcc96af6f5c 100644 (file)
@@ -11,7 +11,8 @@ obj-$(CONFIG_PCI) += pci.o
 # Serial port support
 #
 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_SERIAL_8250) += serial.o
+loongson-serial-$(CONFIG_SERIAL_8250) := serial.o
+obj-y += $(loongson-serial-m) $(loongson-serial-y)
 obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o
 obj-$(CONFIG_LOONGSON_MC146818) += rtc.o
 
index 37ed184398c6bbf8d017f130c73317ed2e548518..42323bcc5d28baf7279aacc50d15a4107c2c71b7 100644 (file)
@@ -33,6 +33,7 @@
 
 static struct node_data prealloc__node_data[MAX_NUMNODES];
 unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
+EXPORT_SYMBOL(__node_distances);
 struct node_data *__node_data[MAX_NUMNODES];
 EXPORT_SYMBOL(__node_data);
 
index 51a0fde4bec14f07caa78b2f1efec395109bb7bc..cac529a405b881aaf6a0d46cd749a0789bb2c414 100644 (file)
@@ -584,11 +584,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                if (insn.i_format.rs == bc_op) {
                        preempt_disable();
                        if (is_fpu_owner())
-                               asm volatile(
-                                       ".set push\n"
-                                       "\t.set mips1\n"
-                                       "\tcfc1\t%0,$31\n"
-                                       "\t.set pop" : "=r" (fcr31));
+                               fcr31 = read_32bit_cp1_register(CP1_STATUS);
                        else
                                fcr31 = current->thread.fpu.fcr31;
                        preempt_enable();
index fa6ebd4bc9e9ae9b28a058650d0f9f87011e4c2e..c3917e251f59393a5138b3d4c557886e4fa45286 100644 (file)
@@ -299,6 +299,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 
        local_irq_save(flags);
 
+       htw_stop();
        pid = read_c0_entryhi() & ASID_MASK;
        address &= (PAGE_MASK << 1);
        write_c0_entryhi(address | pid);
@@ -346,6 +347,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
                        tlb_write_indexed();
        }
        tlbw_use_hazard();
+       htw_start();
        flush_itlb_vm(vma);
        local_irq_restore(flags);
 }
@@ -422,6 +424,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
 
        local_irq_save(flags);
        /* Save old context and create impossible VPN2 value */
+       htw_stop();
        old_ctx = read_c0_entryhi();
        old_pagemask = read_c0_pagemask();
        wired = read_c0_wired();
@@ -443,6 +446,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
 
        write_c0_entryhi(old_ctx);
        write_c0_pagemask(old_pagemask);
+       htw_start();
 out:
        local_irq_restore(flags);
        return ret;
index b5f228e7eae6144565e34c74bf6f86e5d973a760..e3328a96e80909b758a8d619b6a0f8398399d2da 100644 (file)
@@ -1872,8 +1872,16 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
        uasm_l_smp_pgtable_change(l, *p);
 #endif
        iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
-       if (!m4kc_tlbp_war())
+       if (!m4kc_tlbp_war()) {
                build_tlb_probe_entry(p);
+               if (cpu_has_htw) {
+                       /* race condition happens, leaving */
+                       uasm_i_ehb(p);
+                       uasm_i_mfc0(p, wr.r3, C0_INDEX);
+                       uasm_il_bltz(p, r, wr.r3, label_leave);
+                       uasm_i_nop(p);
+               }
+       }
        return wr;
 }
 
index 20102a6d41410fbba6854edab6ee1199d50cc7fa..c427c57781865e13d52dc75ea2bb2f8db5db9ad7 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
  */
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/leds.h>
 #include <linux/platform_device.h>
 
@@ -76,8 +76,4 @@ static int __init led_init(void)
        return platform_device_register(&fled_device);
 }
 
-module_init(led_init);
-
-MODULE_AUTHOR("Chris Dearman <chris@mips.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("LED probe driver for SEAD-3");
+device_initcall(led_init);
index be358a8050c57c14377c1bcc44cd45b332b622b1..6b43af0a34d9dd39c4afd08526342bbdd514b068 100644 (file)
@@ -1,6 +1,10 @@
 obj-y                          += setup.o nlm_hal.o cop2-ex.o dt.o
 obj-$(CONFIG_SMP)              += wakeup.o
-obj-$(CONFIG_USB)              += usb-init.o
-obj-$(CONFIG_USB)              += usb-init-xlp2.o
-obj-$(CONFIG_SATA_AHCI)                += ahci-init.o
-obj-$(CONFIG_SATA_AHCI)                += ahci-init-xlp2.o
+ifdef CONFIG_USB
+obj-y                          += usb-init.o
+obj-y                          += usb-init-xlp2.o
+endif
+ifdef CONFIG_SATA_AHCI
+obj-y                          += ahci-init.o
+obj-y                          += ahci-init-xlp2.o
+endif
index 6854ed5097d2e61bb3f7f3e2937612a5d1058b07..83a1dfd8f0e3c34d02cbe24448f5c5407852f942 100644 (file)
@@ -92,7 +92,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame,
                                /* This marks the end of the previous function,
                                   which means we overran. */
                                break;
-                       stack_size = (unsigned) stack_adjustment;
+                       stack_size = (unsigned long) stack_adjustment;
                } else if (is_ra_save_ins(&ip)) {
                        int ra_slot = ip.i_format.simmediate;
                        if (ra_slot < 0)
index fa374fe3746ba344f30e2b14b8ff6c3669bc5c88..f7ac3edda1b211a512ae0afe6e633a421cda7664 100644 (file)
@@ -443,10 +443,8 @@ static int xlp_setup_msix(uint64_t lnkbase, int node, int link,
        msg.data = 0xc00 | msixvec;
 
        ret = irq_set_msi_desc(xirq, desc);
-       if (ret < 0) {
-               destroy_irq(xirq);
+       if (ret < 0)
                return ret;
-       }
 
        write_msi_msg(xirq, &msg);
        return 0;
index a95c00f5fb9697ed90fa79d44ec887b498f680fc..a304bcc37e4fba137a5a1ee8bc792a50d7258e01 100644 (file)
@@ -107,6 +107,7 @@ static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
 }
 
 unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
+EXPORT_SYMBOL(__node_distances);
 
 static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
 {
index 4006964d8e12646761d954b9f73ff0b503e736b6..a5cb070b54bf4ade7cce44e50f763a5223991e83 100644 (file)
@@ -9,6 +9,8 @@
 #include <asm/errno.h>
 #include <asm-generic/uaccess-unaligned.h>
 
+#include <linux/bug.h>
+
 #define VERIFY_READ 0
 #define VERIFY_WRITE 1
 
  * that put_user is the same as __put_user, etc.
  */
 
-extern int __get_kernel_bad(void);
-extern int __get_user_bad(void);
-extern int __put_kernel_bad(void);
-extern int __put_user_bad(void);
-
 static inline long access_ok(int type, const void __user * addr,
                unsigned long size)
 {
@@ -43,8 +40,8 @@ static inline long access_ok(int type, const void __user * addr,
 #define get_user __get_user
 
 #if !defined(CONFIG_64BIT)
-#define LDD_KERNEL(ptr)                __get_kernel_bad();
-#define LDD_USER(ptr)          __get_user_bad();
+#define LDD_KERNEL(ptr)                BUILD_BUG()
+#define LDD_USER(ptr)          BUILD_BUG()
 #define STD_KERNEL(x, ptr)     __put_kernel_asm64(x,ptr)
 #define STD_USER(x, ptr)       __put_user_asm64(x,ptr)
 #define ASM_WORD_INSN          ".word\t"
@@ -94,7 +91,7 @@ struct exception_data {
            case 2: __get_kernel_asm("ldh",ptr); break; \
            case 4: __get_kernel_asm("ldw",ptr); break; \
            case 8: LDD_KERNEL(ptr); break;             \
-           default: __get_kernel_bad(); break;         \
+           default: BUILD_BUG(); break;                \
            }                                           \
        }                                               \
        else {                                          \
@@ -103,7 +100,7 @@ struct exception_data {
            case 2: __get_user_asm("ldh",ptr); break;   \
            case 4: __get_user_asm("ldw",ptr); break;   \
            case 8: LDD_USER(ptr);  break;              \
-           default: __get_user_bad(); break;           \
+           default: BUILD_BUG(); break;                \
            }                                           \
        }                                               \
                                                        \
@@ -136,7 +133,7 @@ struct exception_data {
            case 2: __put_kernel_asm("sth",__x,ptr); break;     \
            case 4: __put_kernel_asm("stw",__x,ptr); break;     \
            case 8: STD_KERNEL(__x,ptr); break;                 \
-           default: __put_kernel_bad(); break;                 \
+           default: BUILD_BUG(); break;                        \
            }                                                   \
        }                                                       \
        else {                                                  \
@@ -145,7 +142,7 @@ struct exception_data {
            case 2: __put_user_asm("sth",__x,ptr); break;       \
            case 4: __put_user_asm("stw",__x,ptr); break;       \
            case 8: STD_USER(__x,ptr); break;                   \
-           default: __put_user_bad(); break;                   \
+           default: BUILD_BUG(); break;                        \
            }                                                   \
        }                                                       \
                                                                \
index 75196b415d3f8f313ec94025847427bf15a28bd6..e0a23c7bdd432adc59f4f4ebefcc4d51f91b05f1 100644 (file)
@@ -1,13 +1,7 @@
 #ifndef __ASM_PARISC_BITSPERLONG_H
 #define __ASM_PARISC_BITSPERLONG_H
 
-/*
- * using CONFIG_* outside of __KERNEL__ is wrong,
- * __LP64__ was also removed from headers, so what
- * is the right approach on parisc?
- *     -arnd
- */
-#if (defined(__KERNEL__) && defined(CONFIG_64BIT)) || defined (__LP64__)
+#if defined(__LP64__)
 #define __BITS_PER_LONG 64
 #define SHIFT_PER_LONG 6
 #else
index fe88f2649418e63c77022ed74ff43b12c1bccda2..342138983914ce8e0f5641622fb4a46bf9034155 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _PARISC_MSGBUF_H
 #define _PARISC_MSGBUF_H
 
+#include <asm/bitsperlong.h>
+
 /* 
  * The msqid64_ds structure for parisc architecture, copied from sparc.
  * Note extra padding because this structure is passed back and forth
 
 struct msqid64_ds {
        struct ipc64_perm msg_perm;
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
        unsigned int   __pad1;
 #endif
        __kernel_time_t msg_stime;      /* last msgsnd time */
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
        unsigned int   __pad2;
 #endif
        __kernel_time_t msg_rtime;      /* last msgrcv time */
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
        unsigned int   __pad3;
 #endif
        __kernel_time_t msg_ctime;      /* last change time */
index 1e59ffd3bd1ee6663acc20e663e4dafe282f4b2b..f01d89e30d735098d682b4cca2a71e79e689d4e8 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _PARISC_SEMBUF_H
 #define _PARISC_SEMBUF_H
 
+#include <asm/bitsperlong.h>
+
 /* 
  * The semid64_ds structure for parisc architecture.
  * Note extra padding because this structure is passed back and forth
 
 struct semid64_ds {
        struct ipc64_perm sem_perm;             /* permissions .. see ipc.h */
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
        unsigned int    __pad1;
 #endif
        __kernel_time_t sem_otime;              /* last semop time */
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
        unsigned int    __pad2;
 #endif
        __kernel_time_t sem_ctime;              /* last change time */
index 0a3eada1863b71e1e859e5bf1536da089a65ffd1..8496c38560c6bd63c1a780acef9f7bcd1d00b475 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _PARISC_SHMBUF_H
 #define _PARISC_SHMBUF_H
 
+#include <asm/bitsperlong.h>
+
 /* 
  * The shmid64_ds structure for parisc architecture.
  * Note extra padding because this structure is passed back and forth
 
 struct shmid64_ds {
        struct ipc64_perm       shm_perm;       /* operation perms */
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
        unsigned int            __pad1;
 #endif
        __kernel_time_t         shm_atime;      /* last attach time */
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
        unsigned int            __pad2;
 #endif
        __kernel_time_t         shm_dtime;      /* last detach time */
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
        unsigned int            __pad3;
 #endif
        __kernel_time_t         shm_ctime;      /* last change time */
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
        unsigned int            __pad4;
 #endif
        size_t                  shm_segsz;      /* size of segment (bytes) */
@@ -36,23 +38,16 @@ struct shmid64_ds {
        unsigned int            __unused2;
 };
 
-#ifdef CONFIG_64BIT
-/* The 'unsigned int' (formerly 'unsigned long') data types below will
- * ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on
- * a wide kernel, but if some of these values are meant to contain pointers
- * they may need to be 'long long' instead. -PB XXX FIXME
- */
-#endif
 struct shminfo64 {
-       unsigned int    shmmax;
-       unsigned int    shmmin;
-       unsigned int    shmmni;
-       unsigned int    shmseg;
-       unsigned int    shmall;
-       unsigned int    __unused1;
-       unsigned int    __unused2;
-       unsigned int    __unused3;
-       unsigned int    __unused4;
+       unsigned long   shmmax;
+       unsigned long   shmmin;
+       unsigned long   shmmni;
+       unsigned long   shmseg;
+       unsigned long   shmall;
+       unsigned long   __unused1;
+       unsigned long   __unused2;
+       unsigned long   __unused3;
+       unsigned long   __unused4;
 };
 
 #endif /* _PARISC_SHMBUF_H */
index 10df7079f4cdf08dec0e26d871e4270d157a50ed..e26043b73f5d8e32280b4bd1cfe087f4c93aa3b9 100644 (file)
@@ -85,7 +85,7 @@
 struct siginfo;
 
 /* Type of a signal handler.  */
-#ifdef CONFIG_64BIT
+#if defined(__LP64__)
 /* function pointers on 64-bit parisc are pointers to little structs and the
  * compiler doesn't support code which changes or tests the address of
  * the function in the little struct.  This is really ugly -PB
index 8667f18be2385d7423da2efac3803671b500740e..5f5c0373de63cb6dc898e9b3a273427a96fbe607 100644 (file)
 #define __NR_seccomp           (__NR_Linux + 338)
 #define __NR_getrandom         (__NR_Linux + 339)
 #define __NR_memfd_create      (__NR_Linux + 340)
+#define __NR_bpf               (__NR_Linux + 341)
 
-#define __NR_Linux_syscalls    (__NR_memfd_create + 1)
+#define __NR_Linux_syscalls    (__NR_bpf + 1)
 
 
 #define __IGNORE_select                /* newselect */
index b563d9c8268b153429a06cfc85924c236d00c688..fe4f0b89bf8fa0ed21d3f5a568dd946a0dd81be0 100644 (file)
        ENTRY_COMP(msgsnd)
        ENTRY_COMP(msgrcv)
        ENTRY_SAME(msgget)              /* 190 */
-       ENTRY_SAME(msgctl)
-       ENTRY_SAME(shmat)
+       ENTRY_COMP(msgctl)
+       ENTRY_COMP(shmat)
        ENTRY_SAME(shmdt)
        ENTRY_SAME(shmget)
-       ENTRY_SAME(shmctl)              /* 195 */
+       ENTRY_COMP(shmctl)              /* 195 */
        ENTRY_SAME(ni_syscall)          /* streams1 */
        ENTRY_SAME(ni_syscall)          /* streams2 */
        ENTRY_SAME(lstat64)
        ENTRY_SAME(epoll_ctl)           /* 225 */
        ENTRY_SAME(epoll_wait)
        ENTRY_SAME(remap_file_pages)
-       ENTRY_SAME(semtimedop)
+       ENTRY_COMP(semtimedop)
        ENTRY_COMP(mq_open)
        ENTRY_SAME(mq_unlink)           /* 230 */
        ENTRY_COMP(mq_timedsend)
        ENTRY_SAME(seccomp)
        ENTRY_SAME(getrandom)
        ENTRY_SAME(memfd_create)        /* 340 */
+       ENTRY_SAME(bpf)
 
        /* Nothing yet */
 
index a6774560afe3753dd0f4636124c1021305b7a391..493e72f64b35feac4a03fe7912aa7c04067e6fe6 100644 (file)
 #define CPU_UNKNOWN            (~((u32)0))
 
 /* Utility macros */
-#define SKIP_TO_NEXT_CPU(reg_entry)                    \
-({                                                     \
-       while (reg_entry->reg_id != REG_ID("CPUEND"))   \
-               reg_entry++;                            \
-       reg_entry++;                                    \
+#define SKIP_TO_NEXT_CPU(reg_entry)                                    \
+({                                                                     \
+       while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND"))      \
+               reg_entry++;                                            \
+       reg_entry++;                                                    \
 })
 
 /* Kernel Dump section info */
 struct fadump_section {
-       u32     request_flag;
-       u16     source_data_type;
-       u16     error_flags;
-       u64     source_address;
-       u64     source_len;
-       u64     bytes_dumped;
-       u64     destination_address;
+       __be32  request_flag;
+       __be16  source_data_type;
+       __be16  error_flags;
+       __be64  source_address;
+       __be64  source_len;
+       __be64  bytes_dumped;
+       __be64  destination_address;
 };
 
 /* ibm,configure-kernel-dump header. */
 struct fadump_section_header {
-       u32     dump_format_version;
-       u16     dump_num_sections;
-       u16     dump_status_flag;
-       u32     offset_first_dump_section;
+       __be32  dump_format_version;
+       __be16  dump_num_sections;
+       __be16  dump_status_flag;
+       __be32  offset_first_dump_section;
 
        /* Fields for disk dump option. */
-       u32     dd_block_size;
-       u64     dd_block_offset;
-       u64     dd_num_blocks;
-       u32     dd_offset_disk_path;
+       __be32  dd_block_size;
+       __be64  dd_block_offset;
+       __be64  dd_num_blocks;
+       __be32  dd_offset_disk_path;
 
        /* Maximum time allowed to prevent an automatic dump-reboot. */
-       u32     max_time_auto;
+       __be32  max_time_auto;
 };
 
 /*
@@ -174,15 +174,15 @@ static inline u64 str_to_u64(const char *str)
 
 /* Register save area header. */
 struct fadump_reg_save_area_header {
-       u64             magic_number;
-       u32             version;
-       u32             num_cpu_offset;
+       __be64          magic_number;
+       __be32          version;
+       __be32          num_cpu_offset;
 };
 
 /* Register entry. */
 struct fadump_reg_entry {
-       u64             reg_id;
-       u64             reg_value;
+       __be64          reg_id;
+       __be64          reg_value;
 };
 
 /* fadump crash info structure */
index 4ca90a39d6d01af63da46c73d19b816b0979e538..725247beebecda3493e9e477f7fd4ec29911b558 100644 (file)
@@ -159,8 +159,6 @@ struct pci_dn {
 
        int     pci_ext_config_space;   /* for pci devices */
 
-       bool    force_32bit_msi;
-
        struct  pci_dev *pcidev;        /* back-pointer to the pci device */
 #ifdef CONFIG_EEH
        struct eeh_dev *edev;           /* eeh device */
index f19b1e5cb06096e2bd9b68b8cd620669c8943cac..1ceecdda810b04722b88329d52b866c3c540ad0e 100644 (file)
@@ -65,7 +65,7 @@ static ssize_t eeh_pe_state_show(struct device *dev,
                return -ENODEV;
 
        state = eeh_ops->get_state(edev->pe, NULL);
-       return sprintf(buf, "%0x08x %0x08x\n",
+       return sprintf(buf, "0x%08x 0x%08x\n",
                       state, edev->pe->state);
 }
 
index 5bbd1bc8c3b0a3f4265e263ce315311d8562f689..0905c8da90f1bb6e2aedb5e4cbfd7a7351df44a7 100644 (file)
@@ -659,7 +659,13 @@ _GLOBAL(ret_from_except_lite)
 3:
 #endif
        bl      save_nvgprs
+       /*
+        * Use a non volatile GPR to save and restore our thread_info flags
+        * across the call to restore_interrupts.
+        */
+       mr      r30,r4
        bl      restore_interrupts
+       mr      r4,r30
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      do_notify_resume
        b       ret_from_except
index 742694c1d85238fe380e4ceecd7443506bb8d0b7..26d091a1a54cf555627fd95bb135cf98eb3e52b8 100644 (file)
@@ -58,7 +58,7 @@ int __init early_init_dt_scan_fw_dump(unsigned long node,
        const __be32 *sections;
        int i, num_sections;
        int size;
-       const int *token;
+       const __be32 *token;
 
        if (depth != 1 || strcmp(uname, "rtas") != 0)
                return 0;
@@ -72,7 +72,7 @@ int __init early_init_dt_scan_fw_dump(unsigned long node,
                return 1;
 
        fw_dump.fadump_supported = 1;
-       fw_dump.ibm_configure_kernel_dump = *token;
+       fw_dump.ibm_configure_kernel_dump = be32_to_cpu(*token);
 
        /*
         * The 'ibm,kernel-dump' rtas node is present only if there is
@@ -147,11 +147,11 @@ static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm,
        memset(fdm, 0, sizeof(struct fadump_mem_struct));
        addr = addr & PAGE_MASK;
 
-       fdm->header.dump_format_version = 0x00000001;
-       fdm->header.dump_num_sections = 3;
+       fdm->header.dump_format_version = cpu_to_be32(0x00000001);
+       fdm->header.dump_num_sections = cpu_to_be16(3);
        fdm->header.dump_status_flag = 0;
        fdm->header.offset_first_dump_section =
-               (u32)offsetof(struct fadump_mem_struct, cpu_state_data);
+               cpu_to_be32((u32)offsetof(struct fadump_mem_struct, cpu_state_data));
 
        /*
         * Fields for disk dump option.
@@ -167,27 +167,27 @@ static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm,
 
        /* Kernel dump sections */
        /* cpu state data section. */
-       fdm->cpu_state_data.request_flag = FADUMP_REQUEST_FLAG;
-       fdm->cpu_state_data.source_data_type = FADUMP_CPU_STATE_DATA;
+       fdm->cpu_state_data.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG);
+       fdm->cpu_state_data.source_data_type = cpu_to_be16(FADUMP_CPU_STATE_DATA);
        fdm->cpu_state_data.source_address = 0;
-       fdm->cpu_state_data.source_len = fw_dump.cpu_state_data_size;
-       fdm->cpu_state_data.destination_address = addr;
+       fdm->cpu_state_data.source_len = cpu_to_be64(fw_dump.cpu_state_data_size);
+       fdm->cpu_state_data.destination_address = cpu_to_be64(addr);
        addr += fw_dump.cpu_state_data_size;
 
        /* hpte region section */
-       fdm->hpte_region.request_flag = FADUMP_REQUEST_FLAG;
-       fdm->hpte_region.source_data_type = FADUMP_HPTE_REGION;
+       fdm->hpte_region.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG);
+       fdm->hpte_region.source_data_type = cpu_to_be16(FADUMP_HPTE_REGION);
        fdm->hpte_region.source_address = 0;
-       fdm->hpte_region.source_len = fw_dump.hpte_region_size;
-       fdm->hpte_region.destination_address = addr;
+       fdm->hpte_region.source_len = cpu_to_be64(fw_dump.hpte_region_size);
+       fdm->hpte_region.destination_address = cpu_to_be64(addr);
        addr += fw_dump.hpte_region_size;
 
        /* RMA region section */
-       fdm->rmr_region.request_flag = FADUMP_REQUEST_FLAG;
-       fdm->rmr_region.source_data_type = FADUMP_REAL_MODE_REGION;
-       fdm->rmr_region.source_address = RMA_START;
-       fdm->rmr_region.source_len = fw_dump.boot_memory_size;
-       fdm->rmr_region.destination_address = addr;
+       fdm->rmr_region.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG);
+       fdm->rmr_region.source_data_type = cpu_to_be16(FADUMP_REAL_MODE_REGION);
+       fdm->rmr_region.source_address = cpu_to_be64(RMA_START);
+       fdm->rmr_region.source_len = cpu_to_be64(fw_dump.boot_memory_size);
+       fdm->rmr_region.destination_address = cpu_to_be64(addr);
        addr += fw_dump.boot_memory_size;
 
        return addr;
@@ -272,7 +272,7 @@ int __init fadump_reserve_mem(void)
         * first kernel.
         */
        if (fdm_active)
-               fw_dump.boot_memory_size = fdm_active->rmr_region.source_len;
+               fw_dump.boot_memory_size = be64_to_cpu(fdm_active->rmr_region.source_len);
        else
                fw_dump.boot_memory_size = fadump_calculate_reserve_size();
 
@@ -314,8 +314,8 @@ int __init fadump_reserve_mem(void)
                                (unsigned long)(base >> 20));
 
                fw_dump.fadumphdr_addr =
-                               fdm_active->rmr_region.destination_address +
-                               fdm_active->rmr_region.source_len;
+                               be64_to_cpu(fdm_active->rmr_region.destination_address) +
+                               be64_to_cpu(fdm_active->rmr_region.source_len);
                pr_debug("fadumphdr_addr = %p\n",
                                (void *) fw_dump.fadumphdr_addr);
        } else {
@@ -472,9 +472,9 @@ fadump_read_registers(struct fadump_reg_entry *reg_entry, struct pt_regs *regs)
 {
        memset(regs, 0, sizeof(struct pt_regs));
 
-       while (reg_entry->reg_id != REG_ID("CPUEND")) {
-               fadump_set_regval(regs, reg_entry->reg_id,
-                                       reg_entry->reg_value);
+       while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND")) {
+               fadump_set_regval(regs, be64_to_cpu(reg_entry->reg_id),
+                                       be64_to_cpu(reg_entry->reg_value));
                reg_entry++;
        }
        reg_entry++;
@@ -603,20 +603,20 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm)
        if (!fdm->cpu_state_data.bytes_dumped)
                return -EINVAL;
 
-       addr = fdm->cpu_state_data.destination_address;
+       addr = be64_to_cpu(fdm->cpu_state_data.destination_address);
        vaddr = __va(addr);
 
        reg_header = vaddr;
-       if (reg_header->magic_number != REGSAVE_AREA_MAGIC) {
+       if (be64_to_cpu(reg_header->magic_number) != REGSAVE_AREA_MAGIC) {
                printk(KERN_ERR "Unable to read register save area.\n");
                return -ENOENT;
        }
        pr_debug("--------CPU State Data------------\n");
-       pr_debug("Magic Number: %llx\n", reg_header->magic_number);
-       pr_debug("NumCpuOffset: %x\n", reg_header->num_cpu_offset);
+       pr_debug("Magic Number: %llx\n", be64_to_cpu(reg_header->magic_number));
+       pr_debug("NumCpuOffset: %x\n", be32_to_cpu(reg_header->num_cpu_offset));
 
-       vaddr += reg_header->num_cpu_offset;
-       num_cpus = *((u32 *)(vaddr));
+       vaddr += be32_to_cpu(reg_header->num_cpu_offset);
+       num_cpus = be32_to_cpu(*((__be32 *)(vaddr)));
        pr_debug("NumCpus     : %u\n", num_cpus);
        vaddr += sizeof(u32);
        reg_entry = (struct fadump_reg_entry *)vaddr;
@@ -639,13 +639,13 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm)
                fdh = __va(fw_dump.fadumphdr_addr);
 
        for (i = 0; i < num_cpus; i++) {
-               if (reg_entry->reg_id != REG_ID("CPUSTRT")) {
+               if (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUSTRT")) {
                        printk(KERN_ERR "Unable to read CPU state data\n");
                        rc = -ENOENT;
                        goto error_out;
                }
                /* Lower 4 bytes of reg_value contains logical cpu id */
-               cpu = reg_entry->reg_value & FADUMP_CPU_ID_MASK;
+               cpu = be64_to_cpu(reg_entry->reg_value) & FADUMP_CPU_ID_MASK;
                if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) {
                        SKIP_TO_NEXT_CPU(reg_entry);
                        continue;
@@ -692,7 +692,7 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
                return -EINVAL;
 
        /* Check if the dump data is valid. */
-       if ((fdm_active->header.dump_status_flag == FADUMP_ERROR_FLAG) ||
+       if ((be16_to_cpu(fdm_active->header.dump_status_flag) == FADUMP_ERROR_FLAG) ||
                        (fdm_active->cpu_state_data.error_flags != 0) ||
                        (fdm_active->rmr_region.error_flags != 0)) {
                printk(KERN_ERR "Dump taken by platform is not valid\n");
@@ -828,7 +828,7 @@ static void fadump_setup_crash_memory_ranges(void)
 static inline unsigned long fadump_relocate(unsigned long paddr)
 {
        if (paddr > RMA_START && paddr < fw_dump.boot_memory_size)
-               return fdm.rmr_region.destination_address + paddr;
+               return be64_to_cpu(fdm.rmr_region.destination_address) + paddr;
        else
                return paddr;
 }
@@ -902,7 +902,7 @@ static int fadump_create_elfcore_headers(char *bufp)
                         * to the specified destination_address. Hence set
                         * the correct offset.
                         */
-                       phdr->p_offset = fdm.rmr_region.destination_address;
+                       phdr->p_offset = be64_to_cpu(fdm.rmr_region.destination_address);
                }
 
                phdr->p_paddr = mbase;
@@ -951,7 +951,7 @@ static void register_fadump(void)
 
        fadump_setup_crash_memory_ranges();
 
-       addr = fdm.rmr_region.destination_address + fdm.rmr_region.source_len;
+       addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
        /* Initialize fadump crash info header. */
        addr = init_fadump_header(addr);
        vaddr = __va(addr);
@@ -1023,7 +1023,7 @@ void fadump_cleanup(void)
        /* Invalidate the registration only if dump is active. */
        if (fw_dump.dump_active) {
                init_fadump_mem_struct(&fdm,
-                       fdm_active->cpu_state_data.destination_address);
+                       be64_to_cpu(fdm_active->cpu_state_data.destination_address));
                fadump_invalidate_dump(&fdm);
        }
 }
@@ -1063,7 +1063,7 @@ static void fadump_invalidate_release_mem(void)
                return;
        }
 
-       destination_address = fdm_active->cpu_state_data.destination_address;
+       destination_address = be64_to_cpu(fdm_active->cpu_state_data.destination_address);
        fadump_cleanup();
        mutex_unlock(&fadump_mutex);
 
@@ -1183,31 +1183,31 @@ static int fadump_region_show(struct seq_file *m, void *private)
        seq_printf(m,
                        "CPU : [%#016llx-%#016llx] %#llx bytes, "
                        "Dumped: %#llx\n",
-                       fdm_ptr->cpu_state_data.destination_address,
-                       fdm_ptr->cpu_state_data.destination_address +
-                       fdm_ptr->cpu_state_data.source_len - 1,
-                       fdm_ptr->cpu_state_data.source_len,
-                       fdm_ptr->cpu_state_data.bytes_dumped);
+                       be64_to_cpu(fdm_ptr->cpu_state_data.destination_address),
+                       be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) +
+                       be64_to_cpu(fdm_ptr->cpu_state_data.source_len) - 1,
+                       be64_to_cpu(fdm_ptr->cpu_state_data.source_len),
+                       be64_to_cpu(fdm_ptr->cpu_state_data.bytes_dumped));
        seq_printf(m,
                        "HPTE: [%#016llx-%#016llx] %#llx bytes, "
                        "Dumped: %#llx\n",
-                       fdm_ptr->hpte_region.destination_address,
-                       fdm_ptr->hpte_region.destination_address +
-                       fdm_ptr->hpte_region.source_len - 1,
-                       fdm_ptr->hpte_region.source_len,
-                       fdm_ptr->hpte_region.bytes_dumped);
+                       be64_to_cpu(fdm_ptr->hpte_region.destination_address),
+                       be64_to_cpu(fdm_ptr->hpte_region.destination_address) +
+                       be64_to_cpu(fdm_ptr->hpte_region.source_len) - 1,
+                       be64_to_cpu(fdm_ptr->hpte_region.source_len),
+                       be64_to_cpu(fdm_ptr->hpte_region.bytes_dumped));
        seq_printf(m,
                        "DUMP: [%#016llx-%#016llx] %#llx bytes, "
                        "Dumped: %#llx\n",
-                       fdm_ptr->rmr_region.destination_address,
-                       fdm_ptr->rmr_region.destination_address +
-                       fdm_ptr->rmr_region.source_len - 1,
-                       fdm_ptr->rmr_region.source_len,
-                       fdm_ptr->rmr_region.bytes_dumped);
+                       be64_to_cpu(fdm_ptr->rmr_region.destination_address),
+                       be64_to_cpu(fdm_ptr->rmr_region.destination_address) +
+                       be64_to_cpu(fdm_ptr->rmr_region.source_len) - 1,
+                       be64_to_cpu(fdm_ptr->rmr_region.source_len),
+                       be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped));
 
        if (!fdm_active ||
                (fw_dump.reserve_dump_area_start ==
-               fdm_ptr->cpu_state_data.destination_address))
+               be64_to_cpu(fdm_ptr->cpu_state_data.destination_address)))
                goto out;
 
        /* Dump is active. Show reserved memory region. */
@@ -1215,10 +1215,10 @@ static int fadump_region_show(struct seq_file *m, void *private)
                        "    : [%#016llx-%#016llx] %#llx bytes, "
                        "Dumped: %#llx\n",
                        (unsigned long long)fw_dump.reserve_dump_area_start,
-                       fdm_ptr->cpu_state_data.destination_address - 1,
-                       fdm_ptr->cpu_state_data.destination_address -
+                       be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - 1,
+                       be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) -
                        fw_dump.reserve_dump_area_start,
-                       fdm_ptr->cpu_state_data.destination_address -
+                       be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) -
                        fw_dump.reserve_dump_area_start);
 out:
        if (fdm_active)
index 155013da27e05cb801ba961b102d41f3edbfb48d..b15194e2c5fc55ca934dba97fe4863b2c273baa5 100644 (file)
@@ -266,13 +266,3 @@ int pcibus_to_node(struct pci_bus *bus)
 }
 EXPORT_SYMBOL(pcibus_to_node);
 #endif
-
-static void quirk_radeon_32bit_msi(struct pci_dev *dev)
-{
-       struct pci_dn *pdn = pci_get_pdn(dev);
-
-       if (pdn)
-               pdn->force_32bit_msi = true;
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi);
index 23eb9a9441bdad612481a7d1b2fcf12dd17a7ed3..c62be60c727485cce5108fcf4770b28f42129eee 100644 (file)
@@ -30,8 +30,8 @@
 V_FUNCTION_BEGIN(__kernel_getcpu)
   .cfi_startproc
        mfspr   r5,SPRN_SPRG_VDSO_READ
-       cmpdi   cr0,r3,0
-       cmpdi   cr1,r4,0
+       cmpwi   cr0,r3,0
+       cmpwi   cr1,r4,0
        clrlwi  r6,r5,16
        rlwinm  r7,r5,16,31-15,31-0
        beq     cr0,1f
index cad68ff8eca5322990cfa56ee46003ae530e1d39..415a51b028b952c14b4f1ad3c88dd79b73657142 100644 (file)
@@ -103,7 +103,7 @@ unsigned long __max_low_memory = MAX_LOW_MEM;
 /*
  * Check for command-line options that affect what MMU_init will do.
  */
-void MMU_setup(void)
+void __init MMU_setup(void)
 {
        /* Check for nobats option (used in mapin_ram). */
        if (strstr(boot_command_line, "nobats")) {
index 5e1ed1575aabe23c245edcdff06433cfb0a62327..b322bfb51343f65fdfe76d265cdcb76928011d21 100644 (file)
@@ -57,7 +57,7 @@ static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt)
        };
 
        /* Print things out */
-       if (hmi_evt->version != OpalHMIEvt_V1) {
+       if (hmi_evt->version < OpalHMIEvt_V1) {
                pr_err("HMI Interrupt, Unknown event version %d !\n",
                        hmi_evt->version);
                return;
index ad4b31df779a389ceb83b1c62d77a850e9c1b749..e4169d68cb3289a98230a7547b0ef24e3fb5c260 100644 (file)
@@ -216,14 +216,54 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
                                   &data, len);
                if (rc)
                        return -ENXIO;
+
+               /*
+                * Now there is some trickery with the data returned by OPAL
+                * as it's the desired data right justified in a 32-bit BE
+                * word.
+                *
+                * This is a very bad interface and I'm to blame for it :-(
+                *
+                * So we can't just apply a 32-bit swap to what comes from OPAL,
+                * because user space expects the *bytes* to be in their proper
+                * respective positions (ie, LPC position).
+                *
+                * So what we really want to do here is to shift data right
+                * appropriately on a LE kernel.
+                *
+                * IE. If the LPC transaction has bytes B0, B1, B2 and B3 in that
+                * order, we have in memory written to by OPAL at the "data"
+                * pointer:
+                *
+                *               Bytes:      OPAL "data"   LE "data"
+                *   32-bit:   B0 B1 B2 B3   B0B1B2B3      B3B2B1B0
+                *   16-bit:   B0 B1         0000B0B1      B1B00000
+                *    8-bit:   B0            000000B0      B0000000
+                *
+                * So a BE kernel will have the leftmost of the above in the MSB
+                * and rightmost in the LSB and can just then "cast" the u32 "data"
+                * down to the appropriate quantity and write it.
+                *
+                * However, an LE kernel can't. It doesn't need to swap because a
+                * load from data followed by a store to user are going to preserve
+                * the byte ordering which is the wire byte order which is what the
+                * user wants, but in order to "crop" to the right size, we need to
+                * shift right first.
+                */
                switch(len) {
                case 4:
                        rc = __put_user((u32)data, (u32 __user *)ubuf);
                        break;
                case 2:
+#ifdef __LITTLE_ENDIAN__
+                       data >>= 16;
+#endif
                        rc = __put_user((u16)data, (u16 __user *)ubuf);
                        break;
                default:
+#ifdef __LITTLE_ENDIAN__
+                       data >>= 24;
+#endif
                        rc = __put_user((u8)data, (u8 __user *)ubuf);
                        break;
                }
@@ -263,12 +303,31 @@ static ssize_t lpc_debug_write(struct file *filp, const char __user *ubuf,
                        else if (todo > 1 && (pos & 1) == 0)
                                len = 2;
                }
+
+               /*
+                * Similarly to the read case, we have some trickery here but
+                * it's different to handle. We need to pass the value to OPAL in
+                * a register whose layout depends on the access size. We want
+                * to reproduce the memory layout of the user, however we aren't
+                * doing a load from user and a store to another memory location
+                * which would achieve that. Here we pass the value to OPAL via
+                * a register which is expected to contain the "BE" interpretation
+                * of the byte sequence. IE: for a 32-bit access, byte 0 should be
+                * in the MSB. So here we *do* need to byteswap on LE.
+                *
+                *           User bytes:    LE "data"  OPAL "data"
+                *  32-bit:  B0 B1 B2 B3    B3B2B1B0   B0B1B2B3
+                *  16-bit:  B0 B1          0000B1B0   0000B0B1
+                *   8-bit:  B0             000000B0   000000B0
+                */
                switch(len) {
                case 4:
                        rc = __get_user(data, (u32 __user *)ubuf);
+                       data = cpu_to_be32(data);
                        break;
                case 2:
                        rc = __get_user(data, (u16 __user *)ubuf);
+                       data = cpu_to_be16(data);
                        break;
                default:
                        rc = __get_user(data, (u8 __user *)ubuf);
index 468a0f23c7f2b5f756c1b553315793c03492c0d6..3ba435ec3dcd584e5f466b78eae18379a69d482b 100644 (file)
@@ -1509,7 +1509,6 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
                                  unsigned int is_64, struct msi_msg *msg)
 {
        struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
-       struct pci_dn *pdn = pci_get_pdn(dev);
        unsigned int xive_num = hwirq - phb->msi_base;
        __be32 data;
        int rc;
@@ -1523,7 +1522,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
                return -ENXIO;
 
        /* Force 32-bit MSI on some broken devices */
-       if (pdn && pdn->force_32bit_msi)
+       if (dev->no_64bit_msi)
                is_64 = 0;
 
        /* Assign XIVE to PE */
@@ -1997,7 +1996,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
        if (is_kdump_kernel()) {
                pr_info("  Issue PHB reset ...\n");
                ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
-               ioda_eeh_phb_reset(hose, OPAL_DEASSERT_RESET);
+               ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
        }
 
        /* Configure M64 window */
index b2187d0068b876e6909376c81d390cbf7b8bad00..4b20f2c6b3b24ba950d3ea10014e0b1fffbfc0b6 100644 (file)
@@ -50,7 +50,6 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
 {
        struct pci_controller *hose = pci_bus_to_host(pdev->bus);
        struct pnv_phb *phb = hose->private_data;
-       struct pci_dn *pdn = pci_get_pdn(pdev);
        struct msi_desc *entry;
        struct msi_msg msg;
        int hwirq;
@@ -60,7 +59,7 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
        if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
                return -ENODEV;
 
-       if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
+       if (pdev->no_64bit_msi && !phb->msi32_support)
                return -ENODEV;
 
        list_for_each_entry(entry, &pdev->msi_list, list) {
index 6ad83bd11fe21d0aaa22ad0d9cd6e0d399bb945d..c22bb1b4beb878c50bbe2f3c8d4dba7fd5d142f1 100644 (file)
@@ -382,7 +382,7 @@ static int dlpar_online_cpu(struct device_node *dn)
                        BUG_ON(get_cpu_current_state(cpu)
                                        != CPU_STATE_OFFLINE);
                        cpu_maps_update_done();
-                       rc = cpu_up(cpu);
+                       rc = device_online(get_cpu_device(cpu));
                        if (rc)
                                goto out;
                        cpu_maps_update_begin();
@@ -467,7 +467,7 @@ static int dlpar_offline_cpu(struct device_node *dn)
                        if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
                                set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
                                cpu_maps_update_done();
-                               rc = cpu_down(cpu);
+                               rc = device_offline(get_cpu_device(cpu));
                                if (rc)
                                        goto out;
                                cpu_maps_update_begin();
index 8c509d5397c6e9a6f54c19ec569612e0005f96e2..f6880d2a40fbeffed49375c1cfcb174ce47ccb86 100644 (file)
@@ -43,6 +43,7 @@
 #include <asm/trace.h>
 #include <asm/firmware.h>
 #include <asm/plpar_wrappers.h>
+#include <asm/fadump.h>
 
 #include "pseries.h"
 
@@ -247,8 +248,17 @@ static void pSeries_lpar_hptab_clear(void)
        }
 
 #ifdef __LITTLE_ENDIAN__
-       /* Reset exceptions to big endian */
-       if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
+       /*
+        * Reset exceptions to big endian.
+        *
+        * FIXME this is a hack for kexec, we need to reset the exception
+        * endian before starting the new kernel and this is a convenient place
+        * to do it.
+        *
+        * This is also called on boot when a fadump happens. In that case we
+        * must not change the exception endian mode.
+        */
+       if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) {
                long rc;
 
                rc = pseries_big_endian_exceptions();
index 8ab5add4ac824f43c6a6b299b24ed15bf0deafb2..8b909e94fd9a10bbee407c2e1a04df7320e93a71 100644 (file)
@@ -420,7 +420,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
         */
 again:
        if (type == PCI_CAP_ID_MSI) {
-               if (pdn->force_32bit_msi) {
+               if (pdev->no_64bit_msi) {
                        rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
                        if (rc < 0) {
                                /*
index de40b48b460e83a8aaec5168c8a4a1f025d8dd38..da08ed08815751cec116164e8f6d3e828bda9fd4 100644 (file)
@@ -361,7 +361,7 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
        cascade_data->virq = virt_msir;
        msi->cascade_array[irq_index] = cascade_data;
 
-       ret = request_irq(virt_msir, fsl_msi_cascade, 0,
+       ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD,
                          "fsl-msi-cascade", cascade_data);
        if (ret) {
                dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n",
index b988b5addf864a581ff8c36e177379c32ba92518..c8efbb37d6e076ab123a3d5d8066f58edd36acd8 100644 (file)
@@ -293,10 +293,10 @@ static inline void disable_surveillance(void)
        args.token = rtas_token("set-indicator");
        if (args.token == RTAS_UNKNOWN_SERVICE)
                return;
-       args.nargs = 3;
-       args.nret = 1;
+       args.nargs = cpu_to_be32(3);
+       args.nret = cpu_to_be32(1);
        args.rets = &args.args[3];
-       args.args[0] = SURVEILLANCE_TOKEN;
+       args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN);
        args.args[1] = 0;
        args.args[2] = 0;
        enter_rtas(__pa(&args));
index 9d94fdd9f525e9503d7f39f0a71c89cacaf53349..9432d0f202ef20a6bd5ee8300d8f1ac654029c60 100644 (file)
@@ -35,7 +35,6 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
@@ -245,6 +244,7 @@ CONFIG_NF_TABLES_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -252,11 +252,6 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -270,6 +265,7 @@ CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NF_TABLES_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NF_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -286,9 +282,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_SECURITY=m
-CONFIG_NF_NAT_IPV6=m
-CONFIG_IP6_NF_TARGET_MASQUERADE=m
-CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=m
 CONFIG_NET_SCTPPROBE=m
 CONFIG_RDS=m
@@ -374,14 +367,13 @@ CONFIG_BLK_DEV_SR=m
 CONFIG_CHR_DEV_SG=y
 CONFIG_CHR_DEV_SCH=m
 CONFIG_SCSI_ENCLOSURE=m
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=y
 CONFIG_SCSI_SAS_LIBSAS=m
 CONFIG_SCSI_SRP_ATTRS=m
 CONFIG_ISCSI_TCP=m
-CONFIG_LIBFCOE=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
@@ -427,7 +419,6 @@ CONFIG_VIRTIO_NET=m
 CONFIG_NLMON=m
 CONFIG_VHOST_NET=m
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CHELSIO is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
@@ -481,14 +472,14 @@ CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
 CONFIG_JFS_SECURITY=y
 CONFIG_JFS_STATISTICS=y
-CONFIG_XFS_FS=m
+CONFIG_XFS_FS=y
 CONFIG_XFS_QUOTA=y
 CONFIG_XFS_POSIX_ACL=y
 CONFIG_XFS_RT=y
 CONFIG_XFS_DEBUG=y
 CONFIG_GFS2_FS=m
 CONFIG_OCFS2_FS=m
-CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS=y
 CONFIG_BTRFS_FS_POSIX_ACL=y
 CONFIG_NILFS2_FS=m
 CONFIG_FANOTIFY=y
@@ -574,7 +565,6 @@ CONFIG_DEBUG_SHIRQ=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_TIMER_STATS=y
 CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_RT_MUTEX_TESTER=y
 CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_LOCK_STAT=y
@@ -600,8 +590,13 @@ CONFIG_FAULT_INJECTION_DEBUG_FS=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LATENCYTOP=y
 CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
-# CONFIG_KPROBE_EVENT is not set
+CONFIG_UPROBE_EVENT=y
 CONFIG_LKDTM=m
 CONFIG_TEST_LIST_SORT=y
 CONFIG_KPROBES_SANITY_TEST=y
@@ -609,7 +604,10 @@ CONFIG_RBTREE_TEST=y
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_TEST_STRING_HELPERS=y
+CONFIG_TEST_KSTRTOX=y
 CONFIG_DMA_API_DEBUG=y
+CONFIG_TEST_BPF=m
 # CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
 CONFIG_ENCRYPTED_KEYS=m
@@ -673,12 +671,6 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
 CONFIG_CORDIC=m
 CONFIG_CMM=m
 CONFIG_APPLDATA_BASE=y
index 90f514baa37d0b091eda7e04d45d4be45be3cd67..219dca6ea92684485e56d68c4b6d04719217a4f2 100644 (file)
@@ -35,7 +35,6 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
@@ -243,6 +242,7 @@ CONFIG_NF_TABLES_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -250,11 +250,6 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -268,6 +263,7 @@ CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NF_TABLES_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NF_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -284,9 +280,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_SECURITY=m
-CONFIG_NF_NAT_IPV6=m
-CONFIG_IP6_NF_TARGET_MASQUERADE=m
-CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=m
 CONFIG_NET_SCTPPROBE=m
 CONFIG_RDS=m
@@ -371,14 +364,13 @@ CONFIG_BLK_DEV_SR=m
 CONFIG_CHR_DEV_SG=y
 CONFIG_CHR_DEV_SCH=m
 CONFIG_SCSI_ENCLOSURE=m
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=y
 CONFIG_SCSI_SAS_LIBSAS=m
 CONFIG_SCSI_SRP_ATTRS=m
 CONFIG_ISCSI_TCP=m
-CONFIG_LIBFCOE=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
@@ -424,7 +416,6 @@ CONFIG_VIRTIO_NET=m
 CONFIG_NLMON=m
 CONFIG_VHOST_NET=m
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CHELSIO is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
@@ -478,13 +469,13 @@ CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
 CONFIG_JFS_SECURITY=y
 CONFIG_JFS_STATISTICS=y
-CONFIG_XFS_FS=m
+CONFIG_XFS_FS=y
 CONFIG_XFS_QUOTA=y
 CONFIG_XFS_POSIX_ACL=y
 CONFIG_XFS_RT=y
 CONFIG_GFS2_FS=m
 CONFIG_OCFS2_FS=m
-CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS=y
 CONFIG_BTRFS_FS_POSIX_ACL=y
 CONFIG_NILFS2_FS=m
 CONFIG_FANOTIFY=y
@@ -626,12 +617,6 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
 CONFIG_CORDIC=m
 CONFIG_CMM=m
 CONFIG_APPLDATA_BASE=y
index 13559d32af696b9917d9c84678beea19f498461f..822c2f2e0c25d4adac076344e9ceafc971196082 100644 (file)
@@ -33,7 +33,6 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
@@ -241,6 +240,7 @@ CONFIG_NF_TABLES_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -248,11 +248,6 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -266,6 +261,7 @@ CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NF_TABLES_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NF_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -282,9 +278,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_SECURITY=m
-CONFIG_NF_NAT_IPV6=m
-CONFIG_IP6_NF_TARGET_MASQUERADE=m
-CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=m
 CONFIG_NET_SCTPPROBE=m
 CONFIG_RDS=m
@@ -369,14 +362,13 @@ CONFIG_BLK_DEV_SR=m
 CONFIG_CHR_DEV_SG=y
 CONFIG_CHR_DEV_SCH=m
 CONFIG_SCSI_ENCLOSURE=m
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=y
 CONFIG_SCSI_SAS_LIBSAS=m
 CONFIG_SCSI_SRP_ATTRS=m
 CONFIG_ISCSI_TCP=m
-CONFIG_LIBFCOE=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
@@ -422,7 +414,6 @@ CONFIG_VIRTIO_NET=m
 CONFIG_NLMON=m
 CONFIG_VHOST_NET=m
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CHELSIO is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
@@ -476,13 +467,13 @@ CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
 CONFIG_JFS_SECURITY=y
 CONFIG_JFS_STATISTICS=y
-CONFIG_XFS_FS=m
+CONFIG_XFS_FS=y
 CONFIG_XFS_QUOTA=y
 CONFIG_XFS_POSIX_ACL=y
 CONFIG_XFS_RT=y
 CONFIG_GFS2_FS=m
 CONFIG_OCFS2_FS=m
-CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS=y
 CONFIG_BTRFS_FS_POSIX_ACL=y
 CONFIG_NILFS2_FS=m
 CONFIG_FANOTIFY=y
@@ -550,8 +541,11 @@ CONFIG_TIMER_STATS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
-# CONFIG_KPROBE_EVENT is not set
+CONFIG_UPROBE_EVENT=y
 CONFIG_LKDTM=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
@@ -618,12 +612,6 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
 CONFIG_CORDIC=m
 CONFIG_CMM=m
 CONFIG_APPLDATA_BASE=y
index e376789f2d8daa6ac407ca2c103d2f49be69ad6f..9d63051ebec42556cc545835cdb5cb6fe4d94f7a 100644 (file)
@@ -22,8 +22,8 @@ CONFIG_HZ_100=y
 CONFIG_CRASH_DUMP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 # CONFIG_SECCOMP is not set
-# CONFIG_IUCV is not set
 CONFIG_NET=y
+# CONFIG_IUCV is not set
 CONFIG_ATM=y
 CONFIG_ATM_LANE=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -36,9 +36,9 @@ CONFIG_ENCLOSURE_SERVICES=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_SCSI_ENCLOSURE=y
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_FC_ATTRS=y
 CONFIG_SCSI_SRP_ATTRS=y
 CONFIG_ZFCP=y
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
@@ -75,12 +75,6 @@ CONFIG_DEBUG_KERNEL=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 # CONFIG_FTRACE is not set
 # CONFIG_STRICT_DEVMEM is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
 # CONFIG_PFAULT is not set
 # CONFIG_S390_HYPFS_FS is not set
 # CONFIG_VIRTUALIZATION is not set
index fab35a8efa4f924ee2b69a3a3c9e32e73ebaa9a6..785c5f24d6f9e5c7b99d71a11b7eee9dc6e670e2 100644 (file)
@@ -92,10 +92,10 @@ CONFIG_CHR_DEV_ST=y
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=y
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=y
 CONFIG_NETDEVICES=y
@@ -164,14 +164,13 @@ CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
-CONFIG_CRYPTO_CRCT10DIF=m
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
index 51d14fe5eb9a318349876eb376b359695770a0a2..ca1cabb3a96c1ba98062ce399803805cb9743858 100644 (file)
@@ -121,6 +121,8 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
 {
        struct ftrace_graph_ent trace;
 
+       if (unlikely(ftrace_graph_is_dead()))
+               goto out;
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                goto out;
        ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
index dd1c24ceda50245978c97e13c5b992db3139f329..3f51cf4e8f020e1b972eaa15ef4640ebe028e4e3 100644 (file)
@@ -54,12 +54,8 @@ void s390_handle_mcck(void)
         */
        local_irq_save(flags);
        local_mcck_disable();
-       /*
-        * Ummm... Does this make sense at all? Copying the percpu struct
-        * and then zapping it one statement later?
-        */
-       memcpy(&mcck, this_cpu_ptr(&cpu_mcck), sizeof(mcck));
-       memset(&mcck, 0, sizeof(struct mcck_struct));
+       mcck = *this_cpu_ptr(&cpu_mcck);
+       memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
        clear_cpu_flag(CIF_MCCK_PENDING);
        local_mcck_enable();
        local_irq_restore(flags);
index 48c2206a39561ea9373bb3fb8ec48297b3509867..5eec9afbb5b5852cdcbf2a50685a43030f3af5f7 100644 (file)
@@ -19,6 +19,7 @@
        .type  __kernel_clock_gettime,@function
 __kernel_clock_gettime:
        .cfi_startproc
+       ahi     %r15,-16
        basr    %r5,0
 0:     al      %r5,21f-0b(%r5)                 /* get &_vdso_data */
        chi     %r2,__CLOCK_REALTIME_COARSE
@@ -34,8 +35,8 @@ __kernel_clock_gettime:
 1:     l       %r4,__VDSO_UPD_COUNT+4(%r5)     /* load update counter */
        tml     %r4,0x0001                      /* pending update ? loop */
        jnz     1b
-       stcke   24(%r15)                        /* Store TOD clock */
-       lm      %r0,%r1,25(%r15)
+       stcke   0(%r15)                         /* Store TOD clock */
+       lm      %r0,%r1,1(%r15)
        s       %r0,__VDSO_XTIME_STAMP(%r5)     /* TOD - cycle_last */
        sl      %r1,__VDSO_XTIME_STAMP+4(%r5)
        brc     3,2f
@@ -70,6 +71,7 @@ __kernel_clock_gettime:
 8:     st      %r2,0(%r3)                      /* store tp->tv_sec */
        st      %r1,4(%r3)                      /* store tp->tv_nsec */
        lhi     %r2,0
+       ahi     %r15,16
        br      %r14
 
        /* CLOCK_MONOTONIC_COARSE */
@@ -96,8 +98,8 @@ __kernel_clock_gettime:
 11:    l       %r4,__VDSO_UPD_COUNT+4(%r5)     /* load update counter */
        tml     %r4,0x0001                      /* pending update ? loop */
        jnz     11b
-       stcke   24(%r15)                        /* Store TOD clock */
-       lm      %r0,%r1,25(%r15)
+       stcke   0(%r15)                         /* Store TOD clock */
+       lm      %r0,%r1,1(%r15)
        s       %r0,__VDSO_XTIME_STAMP(%r5)     /* TOD - cycle_last */
        sl      %r1,__VDSO_XTIME_STAMP+4(%r5)
        brc     3,12f
@@ -132,11 +134,13 @@ __kernel_clock_gettime:
 17:    st      %r2,0(%r3)                      /* store tp->tv_sec */
        st      %r1,4(%r3)                      /* store tp->tv_nsec */
        lhi     %r2,0
+       ahi     %r15,16
        br      %r14
 
        /* Fallback to system call */
 19:    lhi     %r1,__NR_clock_gettime
        svc     0
+       ahi     %r15,16
        br      %r14
 
 20:    .long   1000000000
index 60def5f562db532e4c014687e38dcefdf1344383..719de6186b206e17c60b973b1661e57cf1505747 100644 (file)
@@ -19,6 +19,7 @@
        .type  __kernel_gettimeofday,@function
 __kernel_gettimeofday:
        .cfi_startproc
+       ahi     %r15,-16
        basr    %r5,0
 0:     al      %r5,13f-0b(%r5)                 /* get &_vdso_data */
 1:     ltr     %r3,%r3                         /* check if tz is NULL */
@@ -29,30 +30,30 @@ __kernel_gettimeofday:
        l       %r4,__VDSO_UPD_COUNT+4(%r5)     /* load update counter */
        tml     %r4,0x0001                      /* pending update ? loop */
        jnz     1b
-       stcke   24(%r15)                        /* Store TOD clock */
-       lm      %r0,%r1,25(%r15)
+       stcke   0(%r15)                         /* Store TOD clock */
+       lm      %r0,%r1,1(%r15)
        s       %r0,__VDSO_XTIME_STAMP(%r5)     /* TOD - cycle_last */
        sl      %r1,__VDSO_XTIME_STAMP+4(%r5)
        brc     3,3f
        ahi     %r0,-1
 3:     ms      %r0,__VDSO_TK_MULT(%r5)         /*  * tk->mult */
-       st      %r0,24(%r15)
+       st      %r0,0(%r15)
        l       %r0,__VDSO_TK_MULT(%r5)
        ltr     %r1,%r1
        mr      %r0,%r0
        jnm     4f
        a       %r0,__VDSO_TK_MULT(%r5)
-4:     al      %r0,24(%r15)
+4:     al      %r0,0(%r15)
        al      %r0,__VDSO_XTIME_NSEC(%r5)      /*  + xtime */
        al      %r1,__VDSO_XTIME_NSEC+4(%r5)
        brc     12,5f
        ahi     %r0,1
-5:     mvc     24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
+5:     mvc     0(4,%r15),__VDSO_XTIME_SEC+4(%r5)
        cl      %r4,__VDSO_UPD_COUNT+4(%r5)     /* check update counter */
        jne     1b
        l       %r4,__VDSO_TK_SHIFT(%r5)        /* Timekeeper shift */
        srdl    %r0,0(%r4)                      /*  >> tk->shift */
-       l       %r4,24(%r15)                    /* get tv_sec from stack */
+       l       %r4,0(%r15)                     /* get tv_sec from stack */
        basr    %r5,0
 6:     ltr     %r0,%r0
        jnz     7f
@@ -71,6 +72,7 @@ __kernel_gettimeofday:
 9:     srl     %r0,6
        st      %r0,4(%r2)                      /* store tv->tv_usec */
 10:    slr     %r2,%r2
+       ahi     %r15,16
        br      %r14
 11:    .long   1000000000
 12:    .long   274877907
index 9d9761f8e11035761c3b1143c6486a9fc52193c6..7699e735ae28ed726f4725ee97619119e1aa5e1e 100644 (file)
@@ -19,6 +19,7 @@
        .type  __kernel_clock_gettime,@function
 __kernel_clock_gettime:
        .cfi_startproc
+       aghi    %r15,-16
        larl    %r5,_vdso_data
        cghi    %r2,__CLOCK_REALTIME_COARSE
        je      4f
@@ -37,10 +38,10 @@ __kernel_clock_gettime:
 0:     lg      %r4,__VDSO_UPD_COUNT(%r5)       /* load update counter */
        tmll    %r4,0x0001                      /* pending update ? loop */
        jnz     0b
-       stcke   48(%r15)                        /* Store TOD clock */
+       stcke   0(%r15)                         /* Store TOD clock */
        lgf     %r2,__VDSO_TK_SHIFT(%r5)        /* Timekeeper shift */
        lg      %r0,__VDSO_WTOM_SEC(%r5)
-       lg      %r1,49(%r15)
+       lg      %r1,1(%r15)
        sg      %r1,__VDSO_XTIME_STAMP(%r5)     /* TOD - cycle_last */
        msgf    %r1,__VDSO_TK_MULT(%r5)         /*  * tk->mult */
        alg     %r1,__VDSO_WTOM_NSEC(%r5)
@@ -56,6 +57,7 @@ __kernel_clock_gettime:
 2:     stg     %r0,0(%r3)                      /* store tp->tv_sec */
        stg     %r1,8(%r3)                      /* store tp->tv_nsec */
        lghi    %r2,0
+       aghi    %r15,16
        br      %r14
 
        /* CLOCK_MONOTONIC_COARSE */
@@ -82,9 +84,9 @@ __kernel_clock_gettime:
 5:     lg      %r4,__VDSO_UPD_COUNT(%r5)       /* load update counter */
        tmll    %r4,0x0001                      /* pending update ? loop */
        jnz     5b
-       stcke   48(%r15)                        /* Store TOD clock */
+       stcke   0(%r15)                         /* Store TOD clock */
        lgf     %r2,__VDSO_TK_SHIFT(%r5)        /* Timekeeper shift */
-       lg      %r1,49(%r15)
+       lg      %r1,1(%r15)
        sg      %r1,__VDSO_XTIME_STAMP(%r5)     /* TOD - cycle_last */
        msgf    %r1,__VDSO_TK_MULT(%r5)         /*  * tk->mult */
        alg     %r1,__VDSO_XTIME_NSEC(%r5)      /*  + tk->xtime_nsec */
@@ -101,6 +103,7 @@ __kernel_clock_gettime:
 7:     stg     %r0,0(%r3)                      /* store tp->tv_sec */
        stg     %r1,8(%r3)                      /* store tp->tv_nsec */
        lghi    %r2,0
+       aghi    %r15,16
        br      %r14
 
        /* CLOCK_THREAD_CPUTIME_ID for this thread */
@@ -134,11 +137,13 @@ __kernel_clock_gettime:
        slgr    %r4,%r0                         /* r4 = tv_nsec */
        stg     %r4,8(%r3)
        lghi    %r2,0
+       aghi    %r15,16
        br      %r14
 
        /* Fallback to system call */
 12:    lghi    %r1,__NR_clock_gettime
        svc     0
+       aghi    %r15,16
        br      %r14
 
 13:    .quad   1000000000
index 7a344995a97fd21246de5554f09674449db8f2fd..6ce46707663cc9d8248e5feedb71a0268d9629f2 100644 (file)
@@ -19,6 +19,7 @@
        .type  __kernel_gettimeofday,@function
 __kernel_gettimeofday:
        .cfi_startproc
+       aghi    %r15,-16
        larl    %r5,_vdso_data
 0:     ltgr    %r3,%r3                         /* check if tz is NULL */
        je      1f
@@ -28,8 +29,8 @@ __kernel_gettimeofday:
        lg      %r4,__VDSO_UPD_COUNT(%r5)       /* load update counter */
        tmll    %r4,0x0001                      /* pending update ? loop */
        jnz     0b
-       stcke   48(%r15)                        /* Store TOD clock */
-       lg      %r1,49(%r15)
+       stcke   0(%r15)                         /* Store TOD clock */
+       lg      %r1,1(%r15)
        sg      %r1,__VDSO_XTIME_STAMP(%r5)     /* TOD - cycle_last */
        msgf    %r1,__VDSO_TK_MULT(%r5)         /*  * tk->mult */
        alg     %r1,__VDSO_XTIME_NSEC(%r5)      /*  + tk->xtime_nsec */
@@ -50,6 +51,7 @@ __kernel_gettimeofday:
        srlg    %r0,%r0,6
        stg     %r0,8(%r2)                      /* store tv->tv_usec */
 4:     lghi    %r2,0
+       aghi    %r15,16
        br      %r14
 5:     .quad   1000000000
        .long   274877907
index 416f2a323ba5e3a3d59a806a921751749ccb7a46..7f0089d9a4aa47ef86e7691502281c140ee637c3 100644 (file)
@@ -66,7 +66,11 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
        clock = S390_lowcore.last_update_clock;
        asm volatile(
                "       stpt    %0\n"   /* Store current cpu timer value */
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+               "       stckf   %1"     /* Store current tod clock value */
+#else
                "       stck    %1"     /* Store current tod clock value */
+#endif
                : "=m" (S390_lowcore.last_update_timer),
                  "=m" (S390_lowcore.last_update_clock));
        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
index 765c1776ec9fd6e900e8c23f39e55af0bf2b316b..0e69b7e7a439ce90e2f831c173fd67d55af53be0 100644 (file)
@@ -22,7 +22,7 @@
 
 int atomic_add_return(int, atomic_t *);
 int atomic_cmpxchg(atomic_t *, int, int);
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+int atomic_xchg(atomic_t *, int);
 int __atomic_add_unless(atomic_t *, int, int);
 void atomic_set(atomic_t *, int);
 
index 32c29a133f9d4b199f1a6a4476f274fd4c0b9890..d38b52dca216273a147328cc2d0288c20c2eeba6 100644 (file)
 #ifndef __ARCH_SPARC_CMPXCHG__
 #define __ARCH_SPARC_CMPXCHG__
 
-static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
-{
-       __asm__ __volatile__("swap [%2], %0"
-                            : "=&r" (val)
-                            : "0" (val), "r" (m)
-                            : "memory");
-       return val;
-}
-
+unsigned long __xchg_u32(volatile u32 *m, u32 new);
 void __xchg_called_with_bad_pointer(void);
 
 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
 {
        switch (size) {
        case 4:
-               return xchg_u32(ptr, x);
+               return __xchg_u32(ptr, x);
        }
        __xchg_called_with_bad_pointer();
        return x;
index 5b1b52a04ad6283fb67308d9bf84b08494870140..7e064c68c5ec8a0ab538a15947d5c44b2db0a322 100644 (file)
@@ -12,6 +12,14 @@ int dma_supported(struct device *dev, u64 mask);
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+                                 enum dma_data_direction dir)
+{
+       /* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this
+        * routine can be a nop.
+        */
+}
+
 extern struct dma_map_ops *dma_ops;
 extern struct dma_map_ops *leon_dma_ops;
 extern struct dma_map_ops pci32_dma_ops;
index a34ad079487e85ea8e3dbc5b314f1235ccf7cc4d..4c7c12d69bea6b78f51efcec0a0b8c0b19372a4d 100644 (file)
@@ -9,9 +9,9 @@ static inline __u16 __arch_swab16p(const __u16 *addr)
 {
        __u16 ret;
 
-       __asm__ __volatile__ ("lduha [%1] %2, %0"
+       __asm__ __volatile__ ("lduha [%2] %3, %0"
                              : "=r" (ret)
-                             : "r" (addr), "i" (ASI_PL));
+                             : "m" (*addr), "r" (addr), "i" (ASI_PL));
        return ret;
 }
 #define __arch_swab16p __arch_swab16p
@@ -20,9 +20,9 @@ static inline __u32 __arch_swab32p(const __u32 *addr)
 {
        __u32 ret;
 
-       __asm__ __volatile__ ("lduwa [%1] %2, %0"
+       __asm__ __volatile__ ("lduwa [%2] %3, %0"
                              : "=r" (ret)
-                             : "r" (addr), "i" (ASI_PL));
+                             : "m" (*addr), "r" (addr), "i" (ASI_PL));
        return ret;
 }
 #define __arch_swab32p __arch_swab32p
@@ -31,9 +31,9 @@ static inline __u64 __arch_swab64p(const __u64 *addr)
 {
        __u64 ret;
 
-       __asm__ __volatile__ ("ldxa [%1] %2, %0"
+       __asm__ __volatile__ ("ldxa [%2] %3, %0"
                              : "=r" (ret)
-                             : "r" (addr), "i" (ASI_PL));
+                             : "m" (*addr), "r" (addr), "i" (ASI_PL));
        return ret;
 }
 #define __arch_swab64p __arch_swab64p
index 8f76f23dac38ec66b0afea55a5311e612f2459f0..f9c6813c132d606c2a2d138342d4ed9941165f47 100644 (file)
@@ -581,7 +581,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
 {
        unsigned long csr_reg, csr, csr_error_bits;
        irqreturn_t ret = IRQ_NONE;
-       u16 stat;
+       u32 stat;
 
        csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
        csr = upa_readq(csr_reg);
@@ -617,7 +617,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
                               pbm->name);
                ret = IRQ_HANDLED;
        }
-       pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
+       pbm->pci_ops->read(pbm->pci_bus, 0, PCI_STATUS, 2, &stat);
        if (stat & (PCI_STATUS_PARITY |
                    PCI_STATUS_SIG_TARGET_ABORT |
                    PCI_STATUS_REC_TARGET_ABORT |
@@ -625,7 +625,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
                    PCI_STATUS_SIG_SYSTEM_ERROR)) {
                printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
                       pbm->name, stat);
-               pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
+               pbm->pci_ops->write(pbm->pci_bus, 0, PCI_STATUS, 2, 0xffff);
                ret = IRQ_HANDLED;
        }
        return ret;
index 302c476413d5c5eaeb29fd5cedd529f86de2d30f..da6f1a7fc4db4713425d1927af185cb797b4c1fc 100644 (file)
@@ -816,13 +816,17 @@ void arch_send_call_function_single_ipi(int cpu)
 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
+       irq_enter();
        generic_smp_call_function_interrupt();
+       irq_exit();
 }
 
 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
+       irq_enter();
        generic_smp_call_function_single_interrupt();
+       irq_exit();
 }
 
 static void tsb_sync(void *info)
index a7c418ac26afbb46500ff812d939a8aefff27945..71cd65ab200c8739634447a995fb5e069cac3717 100644 (file)
@@ -45,6 +45,19 @@ ATOMIC_OP(add, +=)
 
 #undef ATOMIC_OP
 
+int atomic_xchg(atomic_t *v, int new)
+{
+       int ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(ATOMIC_HASH(v), flags);
+       ret = v->counter;
+       v->counter = new;
+       spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
+       return ret;
+}
+EXPORT_SYMBOL(atomic_xchg);
+
 int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        int ret;
@@ -137,3 +150,17 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
        return (unsigned long)prev;
 }
 EXPORT_SYMBOL(__cmpxchg_u32);
+
+unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
+{
+       unsigned long flags;
+       u32 prev;
+
+       spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
+       prev = *ptr;
+       *ptr = new;
+       spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
+
+       return (unsigned long)prev;
+}
+EXPORT_SYMBOL(__xchg_u32);
index ded8a6774ac99a6b9fd6708e440f66bc2f6e7433..41a503c158626a870906de36516765869c40ae1d 100644 (file)
@@ -144,7 +144,7 @@ config INSTRUCTION_DECODER
 
 config PERF_EVENTS_INTEL_UNCORE
        def_bool y
-       depends on PERF_EVENTS && SUP_SUP_INTEL && PCI
+       depends on PERF_EVENTS && CPU_SUP_INTEL && PCI
 
 config OUTPUT_FORMAT
        string
index 704f58aa79cd4853f91663512232a59b3441b4c8..45abc363dd3e44dac4b5ced56d28ad9b510241f3 100644 (file)
@@ -76,8 +76,10 @@ suffix-$(CONFIG_KERNEL_XZ)   := xz
 suffix-$(CONFIG_KERNEL_LZO)    := lzo
 suffix-$(CONFIG_KERNEL_LZ4)    := lz4
 
+RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
+            perl $(srctree)/arch/x86/tools/calc_run_size.pl)
 quiet_cmd_mkpiggy = MKPIGGY $@
-      cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false )
+      cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
 
 targets += piggy.S
 $(obj)/piggy.S: $(obj)/vmlinux.bin.$(suffix-y) $(obj)/mkpiggy FORCE
index cbed1407a5cdb7ead8fa0cae90d2e7902133aaa7..1d7fbbcc196d6f8b661545130972453f109e27d7 100644 (file)
@@ -207,7 +207,8 @@ relocated:
  * Do the decompression, and jump to the new kernel..
  */
                                /* push arguments for decompress_kernel: */
-       pushl   $z_output_len   /* decompressed length */
+       pushl   $z_run_size     /* size of kernel with .bss and .brk */
+       pushl   $z_output_len   /* decompressed length, end of relocs */
        leal    z_extract_offset_negative(%ebx), %ebp
        pushl   %ebp            /* output address */
        pushl   $z_input_len    /* input_len */
@@ -217,7 +218,7 @@ relocated:
        pushl   %eax            /* heap area */
        pushl   %esi            /* real mode pointer */
        call    decompress_kernel /* returns kernel location in %eax */
-       addl    $24, %esp
+       addl    $28, %esp
 
 /*
  * Jump to the decompressed kernel.
index 2884e0c3e8a5880411ce63955b01c103e6eabd00..6b1766c6c08205f3bda8a527dff88097b48e77e3 100644 (file)
@@ -402,13 +402,16 @@ relocated:
  * Do the decompression, and jump to the new kernel..
  */
        pushq   %rsi                    /* Save the real mode argument */
+       movq    $z_run_size, %r9        /* size of kernel with .bss and .brk */
+       pushq   %r9
        movq    %rsi, %rdi              /* real mode address */
        leaq    boot_heap(%rip), %rsi   /* malloc area for uncompression */
        leaq    input_data(%rip), %rdx  /* input_data */
        movl    $z_input_len, %ecx      /* input_len */
        movq    %rbp, %r8               /* output target address */
-       movq    $z_output_len, %r9      /* decompressed length */
+       movq    $z_output_len, %r9      /* decompressed length, end of relocs */
        call    decompress_kernel       /* returns kernel location in %rax */
+       popq    %r9
        popq    %rsi
 
 /*
index 57ab74df7eeaa3eef4954b89f36d20b4c528afd7..30dd59a9f0b4acfd6a70587f6f4e247adbbeb8fd 100644 (file)
@@ -358,7 +358,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
                                  unsigned char *input_data,
                                  unsigned long input_len,
                                  unsigned char *output,
-                                 unsigned long output_len)
+                                 unsigned long output_len,
+                                 unsigned long run_size)
 {
        real_mode = rmode;
 
@@ -381,8 +382,14 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
        free_mem_ptr     = heap;        /* Heap */
        free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
 
-       output = choose_kernel_location(input_data, input_len,
-                                       output, output_len);
+       /*
+        * The memory hole needed for the kernel is the larger of either
+        * the entire decompressed kernel plus relocation table, or the
+        * entire decompressed kernel plus .bss and .brk sections.
+        */
+       output = choose_kernel_location(input_data, input_len, output,
+                                       output_len > run_size ? output_len
+                                                             : run_size);
 
        /* Validate memory location choices. */
        if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1))
index b669ab65bf6cf2e6973d6dc50cb693a1a8568324..d8222f213182f120c6f1d4c43e9ac477b2c2ca24 100644 (file)
@@ -36,11 +36,13 @@ int main(int argc, char *argv[])
        uint32_t olen;
        long ilen;
        unsigned long offs;
+       unsigned long run_size;
        FILE *f = NULL;
        int retval = 1;
 
-       if (argc < 2) {
-               fprintf(stderr, "Usage: %s compressed_file\n", argv[0]);
+       if (argc < 3) {
+               fprintf(stderr, "Usage: %s compressed_file run_size\n",
+                               argv[0]);
                goto bail;
        }
 
@@ -74,6 +76,7 @@ int main(int argc, char *argv[])
        offs += olen >> 12;     /* Add 8 bytes for each 32K block */
        offs += 64*1024 + 128;  /* Add 64K + 128 bytes slack */
        offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
+       run_size = atoi(argv[2]);
 
        printf(".section \".rodata..compressed\",\"a\",@progbits\n");
        printf(".globl z_input_len\n");
@@ -85,6 +88,8 @@ int main(int argc, char *argv[])
        /* z_extract_offset_negative allows simplification of head_32.S */
        printf(".globl z_extract_offset_negative\n");
        printf("z_extract_offset_negative = -0x%lx\n", offs);
+       printf(".globl z_run_size\n");
+       printf("z_run_size = %lu\n", run_size);
 
        printf(".globl input_data, input_data_end\n");
        printf("input_data:\n");
index f48b17df42249e45cca9ef6de99bfd43083507cd..3a52ee0e726d4ca2643ff6b0dec4675f39e296b5 100644 (file)
@@ -20,7 +20,6 @@
 #define THREAD_SIZE_ORDER      1
 #define THREAD_SIZE            (PAGE_SIZE << THREAD_SIZE_ORDER)
 
-#define STACKFAULT_STACK 0
 #define DOUBLEFAULT_STACK 1
 #define NMI_STACK 0
 #define DEBUG_STACK 0
index 678205195ae118e16ca34609a24f472d9875e568..75450b2c7be48393607da8a5fdf050e663eb48c8 100644 (file)
 #define IRQ_STACK_ORDER 2
 #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
 
-#define STACKFAULT_STACK 1
-#define DOUBLEFAULT_STACK 2
-#define NMI_STACK 3
-#define DEBUG_STACK 4
-#define MCE_STACK 5
-#define N_EXCEPTION_STACKS 5  /* hw limit: 7 */
+#define DOUBLEFAULT_STACK 1
+#define NMI_STACK 2
+#define DEBUG_STACK 3
+#define MCE_STACK 4
+#define N_EXCEPTION_STACKS 4  /* hw limit: 7 */
 
 #define PUD_PAGE_SIZE          (_AC(1, UL) << PUD_SHIFT)
 #define PUD_PAGE_MASK          (~(PUD_PAGE_SIZE-1))
index 8cd27e08e23c47cec2f736b6b674b921554eb680..8cd1cc3bc8356ffef29e349f08b3de43aeb79506 100644 (file)
@@ -150,6 +150,7 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 }
 
 void cpu_disable_common(void);
+void cpu_die_common(unsigned int cpu);
 void native_smp_prepare_boot_cpu(void);
 void native_smp_prepare_cpus(unsigned int max_cpus);
 void native_smp_cpus_done(unsigned int max_cpus);
index 854053889d4d2d6f74cdb3143da4f6c63e213996..547e344a6dc60d7db27d43c74d44c783326291bb 100644 (file)
@@ -141,7 +141,7 @@ struct thread_info {
 /* Only used for 64 bit */
 #define _TIF_DO_NOTIFY_MASK                                            \
        (_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME |       \
-        _TIF_USER_RETURN_NOTIFY)
+        _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE)
 
 /* flags to check in __switch_to() */
 #define _TIF_WORK_CTXSW                                                        \
index bc8352e7010a9e805c54068da84b3848dcc12048..707adc6549d82335a20bdf18d18b697fa1fe9eab 100644 (file)
@@ -39,6 +39,7 @@ asmlinkage void simd_coprocessor_error(void);
 
 #ifdef CONFIG_TRACING
 asmlinkage void trace_page_fault(void);
+#define trace_stack_segment stack_segment
 #define trace_divide_error divide_error
 #define trace_bounds bounds
 #define trace_invalid_op invalid_op
index 4b4f78c9ba1902ed87127738052135ed54071088..cfa9b5b2c27a0b72d794f4aa048deaa404e8baef 100644 (file)
@@ -146,6 +146,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
 
 static int __init x86_xsave_setup(char *s)
 {
+       if (strlen(s))
+               return 0;
        setup_clear_cpu_cap(X86_FEATURE_XSAVE);
        setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
        setup_clear_cpu_cap(X86_FEATURE_XSAVES);
index 7aa1acc79789aa9c38de7509dcb018132a50d901..06674473b0e66736d0a64c235c1e9fe7e774deb9 100644 (file)
@@ -108,12 +108,13 @@ static size_t compute_container_size(u8 *data, u32 total_size)
  * load_microcode_amd() to save equivalent cpu table and microcode patches in
  * kernel heap memory.
  */
-static void apply_ucode_in_initrd(void *ucode, size_t size)
+static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
 {
        struct equiv_cpu_entry *eq;
        size_t *cont_sz;
        u32 *header;
        u8  *data, **cont;
+       u8 (*patch)[PATCH_MAX_SIZE];
        u16 eq_id = 0;
        int offset, left;
        u32 rev, eax, ebx, ecx, edx;
@@ -123,10 +124,12 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
        new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
        cont_sz = (size_t *)__pa_nodebug(&container_size);
        cont    = (u8 **)__pa_nodebug(&container);
+       patch   = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
 #else
        new_rev = &ucode_new_rev;
        cont_sz = &container_size;
        cont    = &container;
+       patch   = &amd_ucode_patch;
 #endif
 
        data   = ucode;
@@ -213,9 +216,9 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
                                rev = mc->hdr.patch_id;
                                *new_rev = rev;
 
-                               /* save ucode patch */
-                               memcpy(amd_ucode_patch, mc,
-                                      min_t(u32, header[1], PATCH_MAX_SIZE));
+                               if (save_patch)
+                                       memcpy(patch, mc,
+                                              min_t(u32, header[1], PATCH_MAX_SIZE));
                        }
                }
 
@@ -246,7 +249,7 @@ void __init load_ucode_amd_bsp(void)
        *data = cp.data;
        *size = cp.size;
 
-       apply_ucode_in_initrd(cp.data, cp.size);
+       apply_ucode_in_initrd(cp.data, cp.size, true);
 }
 
 #ifdef CONFIG_X86_32
@@ -263,7 +266,7 @@ void load_ucode_amd_ap(void)
        size_t *usize;
        void **ucode;
 
-       mc = (struct microcode_amd *)__pa(amd_ucode_patch);
+       mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
        if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
                __apply_microcode_amd(mc);
                return;
@@ -275,7 +278,7 @@ void load_ucode_amd_ap(void)
        if (!*ucode || !*usize)
                return;
 
-       apply_ucode_in_initrd(*ucode, *usize);
+       apply_ucode_in_initrd(*ucode, *usize, false);
 }
 
 static void __init collect_cpu_sig_on_bsp(void *arg)
@@ -339,7 +342,7 @@ void load_ucode_amd_ap(void)
                 * AP has a different equivalence ID than BSP, looks like
                 * mixed-steppings silicon so go through the ucode blob anew.
                 */
-               apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size);
+               apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false);
        }
 }
 #endif
@@ -347,7 +350,9 @@ void load_ucode_amd_ap(void)
 int __init save_microcode_in_initrd_amd(void)
 {
        unsigned long cont;
+       int retval = 0;
        enum ucode_state ret;
+       u8 *cont_va;
        u32 eax;
 
        if (!container)
@@ -355,13 +360,15 @@ int __init save_microcode_in_initrd_amd(void)
 
 #ifdef CONFIG_X86_32
        get_bsp_sig();
-       cont = (unsigned long)container;
+       cont    = (unsigned long)container;
+       cont_va = __va(container);
 #else
        /*
         * We need the physical address of the container for both bitness since
         * boot_params.hdr.ramdisk_image is a physical address.
         */
-       cont = __pa(container);
+       cont    = __pa(container);
+       cont_va = container;
 #endif
 
        /*
@@ -372,6 +379,8 @@ int __init save_microcode_in_initrd_amd(void)
        if (relocated_ramdisk)
                container = (u8 *)(__va(relocated_ramdisk) +
                             (cont - boot_params.hdr.ramdisk_image));
+       else
+               container = cont_va;
 
        if (ucode_new_rev)
                pr_info("microcode: updated early to new patch_level=0x%08x\n",
@@ -382,7 +391,7 @@ int __init save_microcode_in_initrd_amd(void)
 
        ret = load_microcode_amd(eax, container, container_size);
        if (ret != UCODE_OK)
-               return -EINVAL;
+               retval = -EINVAL;
 
        /*
         * This will be freed any msec now, stash patches for the current
@@ -391,5 +400,5 @@ int __init save_microcode_in_initrd_amd(void)
        container = NULL;
        container_size = 0;
 
-       return 0;
+       return retval;
 }
index dd9d6190b08dfae5fdaee714ab5f556040af516e..08fe6e8a726e5160e51b9c992da765e2fab29348 100644 (file)
@@ -465,6 +465,16 @@ static void mc_bp_resume(void)
 
        if (uci->valid && uci->mc)
                microcode_ops->apply_microcode(cpu);
+#ifdef CONFIG_X86_64
+       else if (!uci->mc)
+               /*
+                * We might resume and not have applied late microcode but still
+                * have a newer patch stashed from the early loader. We don't
+                * have it in uci->mc so we have to load it the same way we're
+                * applying patches early on the APs.
+                */
+               load_ucode_ap();
+#endif
 }
 
 static struct syscore_ops mc_syscore_ops = {
index 5f28a64e71ea9e834bb835ed5fe4d8e7adaf472f..2c017f242a78144ba18499f52aa5ca0c069aa880 100644 (file)
@@ -124,7 +124,7 @@ void __init load_ucode_bsp(void)
 static bool check_loader_disabled_ap(void)
 {
 #ifdef CONFIG_X86_32
-       return __pa_nodebug(dis_ucode_ldr);
+       return *((bool *)__pa_nodebug(&dis_ucode_ldr));
 #else
        return dis_ucode_ldr;
 #endif
index adf138eac85c3384550e2f14ec5ac3377ed87000..f9ed429d6e4f83a00789ba8e5b81b704905c9764 100644 (file)
@@ -486,14 +486,17 @@ static struct attribute_group snbep_uncore_qpi_format_group = {
        .attrs = snbep_uncore_qpi_formats_attr,
 };
 
-#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                     \
-       .init_box       = snbep_uncore_msr_init_box,            \
+#define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                   \
        .disable_box    = snbep_uncore_msr_disable_box,         \
        .enable_box     = snbep_uncore_msr_enable_box,          \
        .disable_event  = snbep_uncore_msr_disable_event,       \
        .enable_event   = snbep_uncore_msr_enable_event,        \
        .read_counter   = uncore_msr_read_counter
 
+#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                     \
+       __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
+       .init_box       = snbep_uncore_msr_init_box             \
+
 static struct intel_uncore_ops snbep_uncore_msr_ops = {
        SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
 };
@@ -1919,6 +1922,30 @@ static struct intel_uncore_type hswep_uncore_cbox = {
        .format_group           = &hswep_uncore_cbox_format_group,
 };
 
+/*
+ * Write SBOX Initialization register bit by bit to avoid spurious #GPs
+ */
+static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
+{
+       unsigned msr = uncore_msr_box_ctl(box);
+
+       if (msr) {
+               u64 init = SNBEP_PMON_BOX_CTL_INT;
+               u64 flags = 0;
+               int i;
+
+               for_each_set_bit(i, (unsigned long *)&init, 64) {
+                       flags |= (1ULL << i);
+                       wrmsrl(msr, flags);
+               }
+       }
+}
+
+static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
+       __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
+       .init_box               = hswep_uncore_sbox_msr_init_box
+};
+
 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
        &format_attr_event.attr,
        &format_attr_umask.attr,
@@ -1944,7 +1971,7 @@ static struct intel_uncore_type hswep_uncore_sbox = {
        .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
        .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
        .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
-       .ops                    = &snbep_uncore_msr_ops,
+       .ops                    = &hswep_uncore_sbox_msr_ops,
        .format_group           = &hswep_uncore_sbox_format_group,
 };
 
@@ -2025,13 +2052,27 @@ static struct intel_uncore_type hswep_uncore_imc = {
        SNBEP_UNCORE_PCI_COMMON_INIT(),
 };
 
+static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
+
+static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       struct hw_perf_event *hwc = &event->hw;
+       u64 count = 0;
+
+       pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
+       pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
+
+       return count;
+}
+
 static struct intel_uncore_ops hswep_uncore_irp_ops = {
        .init_box       = snbep_uncore_pci_init_box,
        .disable_box    = snbep_uncore_pci_disable_box,
        .enable_box     = snbep_uncore_pci_enable_box,
        .disable_event  = ivbep_uncore_irp_disable_event,
        .enable_event   = ivbep_uncore_irp_enable_event,
-       .read_counter   = ivbep_uncore_irp_read_counter,
+       .read_counter   = hswep_uncore_irp_read_counter,
 };
 
 static struct intel_uncore_type hswep_uncore_irp = {
index 1abcb50b48ae042fd06c2581802af0e1af7f49d5..ff86f19b575849fca7e20a4086e09f798ae8291d 100644 (file)
@@ -24,7 +24,6 @@ static char x86_stack_ids[][8] = {
                [ DEBUG_STACK-1                 ]       = "#DB",
                [ NMI_STACK-1                   ]       = "NMI",
                [ DOUBLEFAULT_STACK-1           ]       = "#DF",
-               [ STACKFAULT_STACK-1            ]       = "#SS",
                [ MCE_STACK-1                   ]       = "#MC",
 #if DEBUG_STKSZ > EXCEPTION_STKSZ
                [ N_EXCEPTION_STACKS ...
index 2e1a6853e00cec950bdd6e0f11586a57c062a05d..fe9f0b79a18b321bbc80711b7e71dde49b7436e5 100644 (file)
@@ -455,6 +455,23 @@ struct intel_stolen_funcs {
        u32 (*base)(int num, int slot, int func, size_t size);
 };
 
+static size_t __init gen9_stolen_size(int num, int slot, int func)
+{
+       u16 gmch_ctrl;
+
+       gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
+       gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
+       gmch_ctrl &= BDW_GMCH_GMS_MASK;
+
+       if (gmch_ctrl < 0xf0)
+               return gmch_ctrl << 25; /* 32 MB units */
+       else
+               /* 4MB increments starting at 0xf0 for 4MB */
+               return (gmch_ctrl - 0xf0 + 1) << 22;
+}
+
+typedef size_t (*stolen_size_fn)(int num, int slot, int func);
+
 static const struct intel_stolen_funcs i830_stolen_funcs __initconst = {
        .base = i830_stolen_base,
        .size = i830_stolen_size,
@@ -490,6 +507,11 @@ static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = {
        .size = gen8_stolen_size,
 };
 
+static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = {
+       .base = intel_stolen_base,
+       .size = gen9_stolen_size,
+};
+
 static const struct intel_stolen_funcs chv_stolen_funcs __initconst = {
        .base = intel_stolen_base,
        .size = chv_stolen_size,
@@ -523,6 +545,7 @@ static const struct pci_device_id intel_stolen_ids[] __initconst = {
        INTEL_BDW_M_IDS(&gen8_stolen_funcs),
        INTEL_BDW_D_IDS(&gen8_stolen_funcs),
        INTEL_CHV_IDS(&chv_stolen_funcs),
+       INTEL_SKL_IDS(&gen9_stolen_funcs),
 };
 
 static void __init intel_graphics_stolen(int num, int slot, int func)
index df088bb03fb3ffec9148c7cc44cb65ef1aa36118..c0226ab541061870bb590a9c817d0770e618697c 100644 (file)
@@ -828,9 +828,15 @@ ENTRY(native_iret)
        jnz native_irq_return_ldt
 #endif
 
+.global native_irq_return_iret
 native_irq_return_iret:
+       /*
+        * This may fault.  Non-paranoid faults on return to userspace are
+        * handled by fixup_bad_iret.  These include #SS, #GP, and #NP.
+        * Double-faults due to espfix64 are handled in do_double_fault.
+        * Other faults here are fatal.
+        */
        iretq
-       _ASM_EXTABLE(native_irq_return_iret, bad_iret)
 
 #ifdef CONFIG_X86_ESPFIX64
 native_irq_return_ldt:
@@ -858,25 +864,6 @@ native_irq_return_ldt:
        jmp native_irq_return_iret
 #endif
 
-       .section .fixup,"ax"
-bad_iret:
-       /*
-        * The iret traps when the %cs or %ss being restored is bogus.
-        * We've lost the original trap vector and error code.
-        * #GPF is the most likely one to get for an invalid selector.
-        * So pretend we completed the iret and took the #GPF in user mode.
-        *
-        * We are now running with the kernel GS after exception recovery.
-        * But error_entry expects us to have user GS to match the user %cs,
-        * so swap back.
-        */
-       pushq $0
-
-       SWAPGS
-       jmp general_protection
-
-       .previous
-
        /* edi: workmask, edx: work */
 retint_careful:
        CFI_RESTORE_STATE
@@ -922,37 +909,6 @@ ENTRY(retint_kernel)
        CFI_ENDPROC
 END(common_interrupt)
 
-       /*
-        * If IRET takes a fault on the espfix stack, then we
-        * end up promoting it to a doublefault.  In that case,
-        * modify the stack to make it look like we just entered
-        * the #GP handler from user space, similar to bad_iret.
-        */
-#ifdef CONFIG_X86_ESPFIX64
-       ALIGN
-__do_double_fault:
-       XCPT_FRAME 1 RDI+8
-       movq RSP(%rdi),%rax             /* Trap on the espfix stack? */
-       sarq $PGDIR_SHIFT,%rax
-       cmpl $ESPFIX_PGD_ENTRY,%eax
-       jne do_double_fault             /* No, just deliver the fault */
-       cmpl $__KERNEL_CS,CS(%rdi)
-       jne do_double_fault
-       movq RIP(%rdi),%rax
-       cmpq $native_irq_return_iret,%rax
-       jne do_double_fault             /* This shouldn't happen... */
-       movq PER_CPU_VAR(kernel_stack),%rax
-       subq $(6*8-KERNEL_STACK_OFFSET),%rax    /* Reset to original stack */
-       movq %rax,RSP(%rdi)
-       movq $0,(%rax)                  /* Missing (lost) #GP error code */
-       movq $general_protection,RIP(%rdi)
-       retq
-       CFI_ENDPROC
-END(__do_double_fault)
-#else
-# define __do_double_fault do_double_fault
-#endif
-
 /*
  * APIC interrupts.
  */
@@ -1124,7 +1080,7 @@ idtentry overflow do_overflow has_error_code=0
 idtentry bounds do_bounds has_error_code=0
 idtentry invalid_op do_invalid_op has_error_code=0
 idtentry device_not_available do_device_not_available has_error_code=0
-idtentry double_fault __do_double_fault has_error_code=1 paranoid=1
+idtentry double_fault do_double_fault has_error_code=1 paranoid=1
 idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0
 idtentry invalid_TSS do_invalid_TSS has_error_code=1
 idtentry segment_not_present do_segment_not_present has_error_code=1
@@ -1289,7 +1245,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
 
 idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
 idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
-idtentry stack_segment do_stack_segment has_error_code=1 paranoid=1
+idtentry stack_segment do_stack_segment has_error_code=1
 #ifdef CONFIG_XEN
 idtentry xen_debug do_debug has_error_code=0
 idtentry xen_int3 do_int3 has_error_code=0
@@ -1399,17 +1355,16 @@ error_sti:
 
 /*
  * There are two places in the kernel that can potentially fault with
- * usergs. Handle them here. The exception handlers after iret run with
- * kernel gs again, so don't set the user space flag. B stepping K8s
- * sometimes report an truncated RIP for IRET exceptions returning to
- * compat mode. Check for these here too.
+ * usergs. Handle them here.  B stepping K8s sometimes report a
+ * truncated RIP for IRET exceptions returning to compat mode. Check
+ * for these here too.
  */
 error_kernelspace:
        CFI_REL_OFFSET rcx, RCX+8
        incl %ebx
        leaq native_irq_return_iret(%rip),%rcx
        cmpq %rcx,RIP+8(%rsp)
-       je error_swapgs
+       je error_bad_iret
        movl %ecx,%eax  /* zero extend */
        cmpq %rax,RIP+8(%rsp)
        je bstep_iret
@@ -1420,7 +1375,15 @@ error_kernelspace:
 bstep_iret:
        /* Fix truncated RIP */
        movq %rcx,RIP+8(%rsp)
-       jmp error_swapgs
+       /* fall through */
+
+error_bad_iret:
+       SWAPGS
+       mov %rsp,%rdi
+       call fixup_bad_iret
+       mov %rax,%rsp
+       decl %ebx       /* Return to usergs */
+       jmp error_sti
        CFI_ENDPROC
 END(error_entry)
 
index 749b0e423419fee7b97698e725dc6d665c1aa828..e510618b2e91a7969bb8cf6c74a35f59e4bf1bea 100644 (file)
@@ -1484,7 +1484,7 @@ unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
         */
        if (work & _TIF_NOHZ) {
                user_exit();
-               work &= ~TIF_NOHZ;
+               work &= ~_TIF_NOHZ;
        }
 
 #ifdef CONFIG_SECCOMP
index 4d2128ac70bdab90afe6a753f090996f30f72ff1..668d8f2a8781ea11fab5110315a946171b0e274d 100644 (file)
@@ -1303,10 +1303,14 @@ static void __ref remove_cpu_from_maps(int cpu)
        numa_remove_cpu(cpu);
 }
 
+static DEFINE_PER_CPU(struct completion, die_complete);
+
 void cpu_disable_common(void)
 {
        int cpu = smp_processor_id();
 
+       init_completion(&per_cpu(die_complete, smp_processor_id()));
+
        remove_siblinginfo(cpu);
 
        /* It's now safe to remove this processor from the online map */
@@ -1316,8 +1320,6 @@ void cpu_disable_common(void)
        fixup_irqs();
 }
 
-static DEFINE_PER_CPU(struct completion, die_complete);
-
 int native_cpu_disable(void)
 {
        int ret;
@@ -1327,16 +1329,21 @@ int native_cpu_disable(void)
                return ret;
 
        clear_local_APIC();
-       init_completion(&per_cpu(die_complete, smp_processor_id()));
        cpu_disable_common();
 
        return 0;
 }
 
+void cpu_die_common(unsigned int cpu)
+{
+       wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ);
+}
+
 void native_cpu_die(unsigned int cpu)
 {
        /* We don't do anything here: idle task is faking death itself. */
-       wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ);
+
+       cpu_die_common(cpu);
 
        /* They ack this in play_dead() by setting CPU_DEAD */
        if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
index 0d0e922fafc149400b4320c793e1d311c96d147b..de801f22128a6b183aa5ab21ac3a3cd158af5610 100644 (file)
@@ -233,32 +233,40 @@ DO_ERROR(X86_TRAP_UD,     SIGILL,  "invalid opcode",              invalid_op)
 DO_ERROR(X86_TRAP_OLD_MF, SIGFPE,  "coprocessor segment overrun",coprocessor_segment_overrun)
 DO_ERROR(X86_TRAP_TS,     SIGSEGV, "invalid TSS",              invalid_TSS)
 DO_ERROR(X86_TRAP_NP,     SIGBUS,  "segment not present",      segment_not_present)
-#ifdef CONFIG_X86_32
 DO_ERROR(X86_TRAP_SS,     SIGBUS,  "stack segment",            stack_segment)
-#endif
 DO_ERROR(X86_TRAP_AC,     SIGBUS,  "alignment check",          alignment_check)
 
 #ifdef CONFIG_X86_64
 /* Runs on IST stack */
-dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
-{
-       enum ctx_state prev_state;
-
-       prev_state = exception_enter();
-       if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
-                      X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
-               preempt_conditional_sti(regs);
-               do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
-               preempt_conditional_cli(regs);
-       }
-       exception_exit(prev_state);
-}
-
 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 {
        static const char str[] = "double fault";
        struct task_struct *tsk = current;
 
+#ifdef CONFIG_X86_ESPFIX64
+       extern unsigned char native_irq_return_iret[];
+
+       /*
+        * If IRET takes a non-IST fault on the espfix64 stack, then we
+        * end up promoting it to a doublefault.  In that case, modify
+        * the stack to make it look like we just entered the #GP
+        * handler from user space, similar to bad_iret.
+        */
+       if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
+               regs->cs == __KERNEL_CS &&
+               regs->ip == (unsigned long)native_irq_return_iret)
+       {
+               struct pt_regs *normal_regs = task_pt_regs(current);
+
+               /* Fake a #GP(0) from userspace. */
+               memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
+               normal_regs->orig_ax = 0;  /* Missing (lost) #GP error code */
+               regs->ip = (unsigned long)general_protection;
+               regs->sp = (unsigned long)&normal_regs->orig_ax;
+               return;
+       }
+#endif
+
        exception_enter();
        /* Return not checked because double check cannot be ignored */
        notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
@@ -399,6 +407,35 @@ asmlinkage __visible struct pt_regs *sync_regs(struct pt_regs *eregs)
        return regs;
 }
 NOKPROBE_SYMBOL(sync_regs);
+
+struct bad_iret_stack {
+       void *error_entry_ret;
+       struct pt_regs regs;
+};
+
+asmlinkage __visible
+struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
+{
+       /*
+        * This is called from entry_64.S early in handling a fault
+        * caused by a bad iret to user mode.  To handle the fault
+        * correctly, we want move our stack frame to task_pt_regs
+        * and we want to pretend that the exception came from the
+        * iret target.
+        */
+       struct bad_iret_stack *new_stack =
+               container_of(task_pt_regs(current),
+                            struct bad_iret_stack, regs);
+
+       /* Copy the IRET target to the new stack. */
+       memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
+
+       /* Copy the remainder of the stack from the current stack. */
+       memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
+
+       BUG_ON(!user_mode_vm(&new_stack->regs));
+       return new_stack;
+}
 #endif
 
 /*
@@ -778,7 +815,7 @@ void __init trap_init(void)
        set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
        set_intr_gate(X86_TRAP_TS, invalid_TSS);
        set_intr_gate(X86_TRAP_NP, segment_not_present);
-       set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
+       set_intr_gate(X86_TRAP_SS, stack_segment);
        set_intr_gate(X86_TRAP_GP, general_protection);
        set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
        set_intr_gate(X86_TRAP_MF, coprocessor_error);
index 5edf088ca51e11789d6d26e3bfbdad16c1104220..9f8a2faf50407b6212f71a5de81aea8680855f87 100644 (file)
@@ -4287,6 +4287,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
                fetch_register_operand(op);
                break;
        case OpCL:
+               op->type = OP_IMM;
                op->bytes = 1;
                op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
                break;
@@ -4294,6 +4295,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
                rc = decode_imm(ctxt, op, 1, true);
                break;
        case OpOne:
+               op->type = OP_IMM;
                op->bytes = 1;
                op->val = 1;
                break;
@@ -4352,21 +4354,27 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
                ctxt->memop.bytes = ctxt->op_bytes + 2;
                goto mem_common;
        case OpES:
+               op->type = OP_IMM;
                op->val = VCPU_SREG_ES;
                break;
        case OpCS:
+               op->type = OP_IMM;
                op->val = VCPU_SREG_CS;
                break;
        case OpSS:
+               op->type = OP_IMM;
                op->val = VCPU_SREG_SS;
                break;
        case OpDS:
+               op->type = OP_IMM;
                op->val = VCPU_SREG_DS;
                break;
        case OpFS:
+               op->type = OP_IMM;
                op->val = VCPU_SREG_FS;
                break;
        case OpGS:
+               op->type = OP_IMM;
                op->val = VCPU_SREG_GS;
                break;
        case OpImplicit:
index ac1c4de3a48491d9b0cf939897e9af57238b3f71..978f402006eef21ee569720a0d573a6a48e12c97 100644 (file)
@@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
         * kvm mmu, before reclaiming the page, we should
         * unmap it from mmu first.
         */
-       WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn)));
+       WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
 
        if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
                kvm_set_pfn_accessed(pfn);
@@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                spte |= PT_PAGE_SIZE_MASK;
        if (tdp_enabled)
                spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
-                       kvm_is_mmio_pfn(pfn));
+                       kvm_is_reserved_pfn(pfn));
 
        if (host_writable)
                spte |= SPTE_HOST_WRITEABLE;
@@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
         * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
         * here.
         */
-       if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
+       if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
            level == PT_PAGE_TABLE_LEVEL &&
            PageTransCompound(pfn_to_page(pfn)) &&
            !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
index 7609e0e421ece96f6671cc7d2310e0c59402ac1b..1318f75d56e4f072885276a18ca00363f074fcd4 100644 (file)
@@ -41,9 +41,8 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
                while (((unsigned long)src & 6) && len >= 2) {
                        __u16 val16;
 
-                       *errp = __get_user(val16, (const __u16 __user *)src);
-                       if (*errp)
-                               return isum;
+                       if (__get_user(val16, (const __u16 __user *)src))
+                               goto out_err;
 
                        *(__u16 *)dst = val16;
                        isum = (__force __wsum)add32_with_carry(
index 4cb8763868fc20add0018a26df9752f1cd03937d..4e5dfec750fc9296e726a213b8e097f4f885fb70 100644 (file)
@@ -1123,7 +1123,7 @@ void mark_rodata_ro(void)
        unsigned long end = (unsigned long) &__end_rodata_hpage_align;
        unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
        unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
-       unsigned long all_end = PFN_ALIGN(&_end);
+       unsigned long all_end;
 
        printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
               (end - start) >> 10);
@@ -1134,7 +1134,16 @@ void mark_rodata_ro(void)
        /*
         * The rodata/data/bss/brk section (but not the kernel text!)
         * should also be not-executable.
+        *
+        * We align all_end to PMD_SIZE because the existing mapping
+        * is a full PMD. If we would align _brk_end to PAGE_SIZE we
+        * split the PMD and the reminder between _brk_end and the end
+        * of the PMD will remain mapped executable.
+        *
+        * Any PMD which was setup after the one which covers _brk_end
+        * has been zapped already via cleanup_highmem().
         */
+       all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
        set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
 
        rodata_test();
diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl
new file mode 100644 (file)
index 0000000..23210ba
--- /dev/null
@@ -0,0 +1,39 @@
+#!/usr/bin/perl
+#
+# Calculate the amount of space needed to run the kernel, including room for
+# the .bss and .brk sections.
+#
+# Usage:
+# objdump -h a.out | perl calc_run_size.pl
+use strict;
+
+my $mem_size = 0;
+my $file_offset = 0;
+
+my $sections=" *[0-9]+ \.(?:bss|brk) +";
+while (<>) {
+       if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) {
+               my $size = hex($1);
+               my $offset = hex($2);
+               $mem_size += $size;
+               if ($file_offset == 0) {
+                       $file_offset = $offset;
+               } elsif ($file_offset != $offset) {
+                       # BFD linker shows the same file offset in ELF.
+                       # Gold linker shows them as consecutive.
+                       next if ($file_offset + $mem_size == $offset + $size);
+
+                       printf STDERR "file_offset: 0x%lx\n", $file_offset;
+                       printf STDERR "mem_size: 0x%lx\n", $mem_size;
+                       printf STDERR "offset: 0x%lx\n", $offset;
+                       printf STDERR "size: 0x%lx\n", $size;
+
+                       die ".bss and .brk are non-contiguous\n";
+               }
+       }
+}
+
+if ($file_offset == 0) {
+       die "Never found .bss or .brk file offset\n";
+}
+printf("%d\n", $mem_size + $file_offset);
index 8650cdb53209d0f298fa219ac617454cea0fa087..4c071aeb8417bb419a45632c7d9f9a5c96bf3e76 100644 (file)
@@ -510,6 +510,9 @@ static void xen_cpu_die(unsigned int cpu)
                current->state = TASK_UNINTERRUPTIBLE;
                schedule_timeout(HZ/10);
        }
+
+       cpu_die_common(cpu);
+
        xen_smp_intr_free(cpu);
        xen_uninit_lock_cpu(cpu);
        xen_teardown_timer(cpu);
index 49c6c3d9444916e0dc727d51c96349e92c424d6a..81f57e8c8f1be91f6cffea3b157b6bead7a541e1 100644 (file)
@@ -319,8 +319,8 @@ config XTENSA_PLATFORM_S6105
 
 config XTENSA_PLATFORM_XTFPGA
        bool "XTFPGA"
+       select ETHOC if ETHERNET
        select SERIAL_CONSOLE
-       select ETHOC
        select XTENSA_CALIBRATE_CCOUNT
        help
          XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605).
@@ -367,7 +367,7 @@ config BUILTIN_DTB
 config BLK_DEV_SIMDISK
        tristate "Host file-based simulated block device support"
        default n
-       depends on XTENSA_PLATFORM_ISS
+       depends on XTENSA_PLATFORM_ISS && BLOCK
        help
          Create block devices that map to files in the host file system.
          Device binding to host file may be changed at runtime via proc
diff --git a/arch/xtensa/boot/dts/lx200mx.dts b/arch/xtensa/boot/dts/lx200mx.dts
new file mode 100644 (file)
index 0000000..249822b
--- /dev/null
@@ -0,0 +1,16 @@
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-16m.dtsi"
+
+/ {
+       compatible = "cdns,xtensa-lx200";
+       memory@0 {
+               device_type = "memory";
+               reg = <0x00000000 0x06000000>;
+       };
+       pic: pic {
+               compatible = "cdns,xtensa-mx";
+               #interrupt-cells = <2>;
+               interrupt-controller;
+       };
+};
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
new file mode 100644 (file)
index 0000000..f4b7b38
--- /dev/null
@@ -0,0 +1,131 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_MEMCG=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_XTENSA_VARIANT_DC233C=y
+CONFIG_XTENSA_UNALIGNED_USER=y
+CONFIG_PREEMPT=y
+CONFIG_HIGHMEM=y
+# CONFIG_PCI is not set
+CONFIG_XTENSA_PLATFORM_XTFPGA=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
+CONFIG_USE_OF=y
+CONFIG_BUILTIN_DTB="kc705"
+# CONFIG_COMPACTION is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UBIFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_STACKTRACE=y
+CONFIG_RCU_TRACE=y
+# CONFIG_FTRACE is not set
+CONFIG_LD_NO_RELAX=y
+# CONFIG_S32C1I_SELFTEST is not set
+CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
new file mode 100644 (file)
index 0000000..22eeacb
--- /dev/null
@@ -0,0 +1,135 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_MEMCG=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_XTENSA_VARIANT_CUSTOM=y
+CONFIG_XTENSA_VARIANT_CUSTOM_NAME="test_mmuhifi_c3"
+CONFIG_XTENSA_UNALIGNED_USER=y
+CONFIG_PREEMPT=y
+CONFIG_HAVE_SMP=y
+CONFIG_SMP=y
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
+# CONFIG_PCI is not set
+CONFIG_XTENSA_PLATFORM_XTFPGA=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
+CONFIG_USE_OF=y
+CONFIG_BUILTIN_DTB="lx200mx"
+# CONFIG_COMPACTION is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UBIFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_VM=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_STACKTRACE=y
+CONFIG_RCU_TRACE=y
+# CONFIG_FTRACE is not set
+CONFIG_LD_NO_RELAX=y
+# CONFIG_S32C1I_SELFTEST is not set
+CONFIG_CRYPTO_ANSI_CPRNG=y
index b2173e5da601cbe57303f9060faacb0dde0310cf..0383aed5912111b6a0b2cd4d0c58f758a95b4a7e 100644 (file)
@@ -277,6 +277,8 @@ static inline pte_t pte_mkwrite(pte_t pte)
 static inline pte_t pte_mkspecial(pte_t pte)
        { return pte; }
 
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK))
+
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
index 8883fc877c5c93334cacfd29ffe6a12300bb0961..db5bb72e2f4eda85f6ad5fe127be9796c8862ad9 100644 (file)
@@ -384,7 +384,8 @@ __SYSCALL(174, sys_chroot, 1)
 #define __NR_pivot_root                        175
 __SYSCALL(175, sys_pivot_root, 2)
 #define __NR_umount                            176
-__SYSCALL(176, sys_umount, 2)
+__SYSCALL(176, sys_oldumount, 1)
+#define __ARCH_WANT_SYS_OLDUMOUNT
 #define __NR_swapoff                           177
 __SYSCALL(177, sys_swapoff, 1)
 #define __NR_sync                              178
@@ -742,7 +743,14 @@ __SYSCALL(335, sys_sched_getattr, 3)
 #define __NR_renameat2                         336
 __SYSCALL(336, sys_renameat2, 5)
 
-#define __NR_syscall_count                     337
+#define __NR_seccomp                           337
+__SYSCALL(337, sys_seccomp, 3)
+#define __NR_getrandom                         338
+__SYSCALL(338, sys_getrandom, 3)
+#define __NR_memfd_create                      339
+__SYSCALL(339, sys_memfd_create, 2)
+
+#define __NR_syscall_count                     340
 
 /*
  * sysxtensa syscall handler
index 0984232e429fb61d427c90340fa0c77e55c1fee2..5cbd5d9ea61dd52969d9c55313dbf703f3c4d24f 100644 (file)
@@ -216,9 +216,10 @@ static int bio_integrity_process(struct bio *bio,
 {
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
        struct blk_integrity_iter iter;
-       struct bio_vec *bv;
+       struct bvec_iter bviter;
+       struct bio_vec bv;
        struct bio_integrity_payload *bip = bio_integrity(bio);
-       unsigned int i, ret = 0;
+       unsigned int ret = 0;
        void *prot_buf = page_address(bip->bip_vec->bv_page) +
                bip->bip_vec->bv_offset;
 
@@ -227,11 +228,11 @@ static int bio_integrity_process(struct bio *bio,
        iter.seed = bip_get_seed(bip);
        iter.prot_buf = prot_buf;
 
-       bio_for_each_segment_all(bv, bio, i) {
-               void *kaddr = kmap_atomic(bv->bv_page);
+       bio_for_each_segment(bv, bio, bviter) {
+               void *kaddr = kmap_atomic(bv.bv_page);
 
-               iter.data_buf = kaddr + bv->bv_offset;
-               iter.data_size = bv->bv_len;
+               iter.data_buf = kaddr + bv.bv_offset;
+               iter.data_size = bv.bv_len;
 
                ret = proc_fn(&iter);
                if (ret) {
index b3ac40aef46b317c5a432a92ba9b40c8e1942504..89b97b5e0881853054c0807c5607c20d91d90762 100644 (file)
@@ -97,19 +97,22 @@ void blk_recalc_rq_segments(struct request *rq)
 
 void blk_recount_segments(struct request_queue *q, struct bio *bio)
 {
-       bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
-                       &q->queue_flags);
-       bool merge_not_need = bio->bi_vcnt < queue_max_segments(q);
+       unsigned short seg_cnt;
+
+       /* estimate segment number by bi_vcnt for non-cloned bio */
+       if (bio_flagged(bio, BIO_CLONED))
+               seg_cnt = bio_segments(bio);
+       else
+               seg_cnt = bio->bi_vcnt;
 
-       if (no_sg_merge && !bio_flagged(bio, BIO_CLONED) &&
-                       merge_not_need)
-               bio->bi_phys_segments = bio->bi_vcnt;
+       if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
+                       (seg_cnt < queue_max_segments(q)))
+               bio->bi_phys_segments = seg_cnt;
        else {
                struct bio *nxt = bio->bi_next;
 
                bio->bi_next = NULL;
-               bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio,
-                               no_sg_merge && merge_not_need);
+               bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
                bio->bi_next = nxt;
        }
 
index 68929bad9a6a4048151f485cc1e44db2d655fc0f..1d016fc9a8b640c54ce7e06e9f1ce1f293b694e6 100644 (file)
@@ -107,11 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
        wake_up_all(&q->mq_freeze_wq);
 }
 
-/*
- * Guarantee no request is in use, so we can change any data structure of
- * the queue afterward.
- */
-void blk_mq_freeze_queue(struct request_queue *q)
+static void blk_mq_freeze_queue_start(struct request_queue *q)
 {
        bool freeze;
 
@@ -123,9 +119,23 @@ void blk_mq_freeze_queue(struct request_queue *q)
                percpu_ref_kill(&q->mq_usage_counter);
                blk_mq_run_queues(q, false);
        }
+}
+
+static void blk_mq_freeze_queue_wait(struct request_queue *q)
+{
        wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
 }
 
+/*
+ * Guarantee no request is in use, so we can change any data structure of
+ * the queue afterward.
+ */
+void blk_mq_freeze_queue(struct request_queue *q)
+{
+       blk_mq_freeze_queue_start(q);
+       blk_mq_freeze_queue_wait(q);
+}
+
 static void blk_mq_unfreeze_queue(struct request_queue *q)
 {
        bool wake;
@@ -1921,7 +1931,7 @@ void blk_mq_free_queue(struct request_queue *q)
 /* Basically redo blk_mq_init_queue with queue frozen */
 static void blk_mq_queue_reinit(struct request_queue *q)
 {
-       blk_mq_freeze_queue(q);
+       WARN_ON_ONCE(!q->mq_freeze_depth);
 
        blk_mq_sysfs_unregister(q);
 
@@ -1936,8 +1946,6 @@ static void blk_mq_queue_reinit(struct request_queue *q)
        blk_mq_map_swqueue(q);
 
        blk_mq_sysfs_register(q);
-
-       blk_mq_unfreeze_queue(q);
 }
 
 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
@@ -1956,8 +1964,25 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
                return NOTIFY_OK;
 
        mutex_lock(&all_q_mutex);
+
+       /*
+        * We need to freeze and reinit all existing queues.  Freezing
+        * involves synchronous wait for an RCU grace period and doing it
+        * one by one may take a long time.  Start freezing all queues in
+        * one swoop and then wait for the completions so that freezing can
+        * take place in parallel.
+        */
+       list_for_each_entry(q, &all_q_list, all_q_node)
+               blk_mq_freeze_queue_start(q);
+       list_for_each_entry(q, &all_q_list, all_q_node)
+               blk_mq_freeze_queue_wait(q);
+
        list_for_each_entry(q, &all_q_list, all_q_node)
                blk_mq_queue_reinit(q);
+
+       list_for_each_entry(q, &all_q_list, all_q_node)
+               blk_mq_unfreeze_queue(q);
+
        mutex_unlock(&all_q_mutex);
        return NOTIFY_OK;
 }
index e50170ca7c33f446acc16e29a0d0097828919c30..31666c92b46af29919f42ea3e1093caed7127d71 100644 (file)
@@ -157,14 +157,16 @@ out:
 
 int ioprio_best(unsigned short aprio, unsigned short bprio)
 {
-       unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
-       unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
+       unsigned short aclass;
+       unsigned short bclass;
 
-       if (aclass == IOPRIO_CLASS_NONE)
-               aclass = IOPRIO_CLASS_BE;
-       if (bclass == IOPRIO_CLASS_NONE)
-               bclass = IOPRIO_CLASS_BE;
+       if (!ioprio_valid(aprio))
+               aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
+       if (!ioprio_valid(bprio))
+               bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
 
+       aclass = IOPRIO_PRIO_CLASS(aprio);
+       bclass = IOPRIO_PRIO_CLASS(bprio);
        if (aclass == bclass)
                return min(aprio, bprio);
        if (aclass > bclass)
index 1e053d911240b577df324912bb68af13e6174bcc..b0c2a616c8f9b859191c075526c473ea1df796bc 100644 (file)
@@ -458,7 +458,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
        rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
        if (IS_ERR(rq)) {
                err = PTR_ERR(rq);
-               goto error;
+               goto error_free_buffer;
        }
        blk_rq_set_block_pc(rq);
 
@@ -531,9 +531,11 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
        }
        
 error:
+       blk_put_request(rq);
+
+error_free_buffer:
        kfree(buffer);
-       if (rq)
-               blk_put_request(rq);
+
        return err;
 }
 EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
index ed122e17636e32298129a8e28429ef86cb27a4fa..7556e7c4a055cd0c2dad68fa11a3f8746e6ab16e 100644 (file)
@@ -290,6 +290,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                    DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"),
                },
        },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "Dell Vostro 3546",
+       .matches = {
+                   DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                   DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3546"),
+               },
+       },
 
        /*
         * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
index 143ec6ea1468109a745bce176b2f15d3aaaced19..7db19316076659b493ab1d1c9a295dc88450381d 100644 (file)
@@ -878,7 +878,7 @@ int acpi_dev_suspend_late(struct device *dev)
                return 0;
 
        target_state = acpi_target_system_state();
-       wakeup = device_may_wakeup(dev);
+       wakeup = device_may_wakeup(dev) && acpi_device_can_wakeup(adev);
        error = acpi_device_wakeup(adev, target_state, wakeup);
        if (wakeup && error)
                return error;
index 807a88a0f394f8a639cbc3f6e2b78073078986cc..9d75ead2a1f9107e92bf7f6408348add2cd71a7f 100644 (file)
@@ -1164,7 +1164,8 @@ static bool acpi_video_device_in_dod(struct acpi_video_device *device)
                return true;
 
        for (i = 0; i < video->attached_count; i++) {
-               if (video->attached_array[i].bind_info == device)
+               if ((video->attached_array[i].value.int_val & 0xfff) ==
+                   (device->device_id & 0xfff))
                        return true;
        }
 
index 5f039f1910677ff45d59cb4f7c0812f84f1dac23..49f1e6890587e0b7f8970fe2dbdb7d879da92871 100644 (file)
@@ -60,6 +60,7 @@ enum board_ids {
        /* board IDs by feature in alphabetical order */
        board_ahci,
        board_ahci_ign_iferr,
+       board_ahci_nomsi,
        board_ahci_noncq,
        board_ahci_nosntf,
        board_ahci_yes_fbs,
@@ -121,6 +122,13 @@ static const struct ata_port_info ahci_port_info[] = {
                .udma_mask      = ATA_UDMA6,
                .port_ops       = &ahci_ops,
        },
+       [board_ahci_nomsi] = {
+               AHCI_HFLAGS     (AHCI_HFLAG_NO_MSI),
+               .flags          = AHCI_FLAG_COMMON,
+               .pio_mask       = ATA_PIO4,
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &ahci_ops,
+       },
        [board_ahci_noncq] = {
                AHCI_HFLAGS     (AHCI_HFLAG_NO_NCQ),
                .flags          = AHCI_FLAG_COMMON,
@@ -313,6 +321,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
        { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
        { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
+       { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
+       { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
+       { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
+       { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
+       { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
+       { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
+       { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -475,10 +491,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },   /* ASM1062 */
 
        /*
-        * Samsung SSDs found on some macbooks.  NCQ times out.
-        * https://bugzilla.kernel.org/show_bug.cgi?id=60731
+        * Samsung SSDs found on some macbooks.  NCQ times out if MSI is
+        * enabled.  https://bugzilla.kernel.org/show_bug.cgi?id=60731
         */
-       { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
+       { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
+       { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
 
        /* Enmotus */
        { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
@@ -514,12 +531,9 @@ MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
 static void ahci_pci_save_initial_config(struct pci_dev *pdev,
                                         struct ahci_host_priv *hpriv)
 {
-       unsigned int force_port_map = 0;
-       unsigned int mask_port_map = 0;
-
        if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
                dev_info(&pdev->dev, "JMB361 has only one port\n");
-               force_port_map = 1;
+               hpriv->force_port_map = 1;
        }
 
        /*
@@ -529,9 +543,9 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
         */
        if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
                if (pdev->device == 0x6121)
-                       mask_port_map = 0x3;
+                       hpriv->mask_port_map = 0x3;
                else
-                       mask_port_map = 0xf;
+                       hpriv->mask_port_map = 0xf;
                dev_info(&pdev->dev,
                          "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
        }
index 5eb61c9e63da95cbd24a54ee0604262288c8a9f6..97683e45ab043be5045ae22945a5520e845cfe4f 100644 (file)
@@ -1778,16 +1778,15 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
        }
 }
 
-static void ahci_update_intr_status(struct ata_port *ap)
+static void ahci_port_intr(struct ata_port *ap)
 {
        void __iomem *port_mmio = ahci_port_base(ap);
-       struct ahci_port_priv *pp = ap->private_data;
        u32 status;
 
        status = readl(port_mmio + PORT_IRQ_STAT);
        writel(status, port_mmio + PORT_IRQ_STAT);
 
-       atomic_or(status, &pp->intr_status);
+       ahci_handle_port_interrupt(ap, port_mmio, status);
 }
 
 static irqreturn_t ahci_port_thread_fn(int irq, void *dev_instance)
@@ -1808,34 +1807,6 @@ static irqreturn_t ahci_port_thread_fn(int irq, void *dev_instance)
        return IRQ_HANDLED;
 }
 
-irqreturn_t ahci_thread_fn(int irq, void *dev_instance)
-{
-       struct ata_host *host = dev_instance;
-       struct ahci_host_priv *hpriv = host->private_data;
-       u32 irq_masked = hpriv->port_map;
-       unsigned int i;
-
-       for (i = 0; i < host->n_ports; i++) {
-               struct ata_port *ap;
-
-               if (!(irq_masked & (1 << i)))
-                       continue;
-
-               ap = host->ports[i];
-               if (ap) {
-                       ahci_port_thread_fn(irq, ap);
-                       VPRINTK("port %u\n", i);
-               } else {
-                       VPRINTK("port %u (no irq)\n", i);
-                       if (ata_ratelimit())
-                               dev_warn(host->dev,
-                                        "interrupt on disabled port %u\n", i);
-               }
-       }
-
-       return IRQ_HANDLED;
-}
-
 static irqreturn_t ahci_multi_irqs_intr(int irq, void *dev_instance)
 {
        struct ata_port *ap = dev_instance;
@@ -1875,6 +1846,8 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance)
 
        irq_masked = irq_stat & hpriv->port_map;
 
+       spin_lock(&host->lock);
+
        for (i = 0; i < host->n_ports; i++) {
                struct ata_port *ap;
 
@@ -1883,7 +1856,7 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance)
 
                ap = host->ports[i];
                if (ap) {
-                       ahci_update_intr_status(ap);
+                       ahci_port_intr(ap);
                        VPRINTK("port %u\n", i);
                } else {
                        VPRINTK("port %u (no irq)\n", i);
@@ -1906,9 +1879,11 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance)
         */
        writel(irq_stat, mmio + HOST_IRQ_STAT);
 
+       spin_unlock(&host->lock);
+
        VPRINTK("EXIT\n");
 
-       return handled ? IRQ_WAKE_THREAD : IRQ_NONE;
+       return IRQ_RETVAL(handled);
 }
 
 unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
@@ -2320,8 +2295,13 @@ static int ahci_port_start(struct ata_port *ap)
         */
        pp->intr_mask = DEF_PORT_IRQ;
 
-       spin_lock_init(&pp->lock);
-       ap->lock = &pp->lock;
+       /*
+        * Switch to per-port locking in case each port has its own MSI vector.
+        */
+       if ((hpriv->flags & AHCI_HFLAG_MULTI_MSI)) {
+               spin_lock_init(&pp->lock);
+               ap->lock = &pp->lock;
+       }
 
        ap->private_data = pp;
 
@@ -2482,31 +2462,6 @@ out_free_irqs:
        return rc;
 }
 
-static int ahci_host_activate_single_irq(struct ata_host *host, int irq,
-                                        struct scsi_host_template *sht)
-{
-       int i, rc;
-
-       rc = ata_host_start(host);
-       if (rc)
-               return rc;
-
-       rc = devm_request_threaded_irq(host->dev, irq, ahci_single_irq_intr,
-                                      ahci_thread_fn, IRQF_SHARED,
-                                      dev_driver_string(host->dev), host);
-       if (rc)
-               return rc;
-
-       for (i = 0; i < host->n_ports; i++)
-               ata_port_desc(host->ports[i], "irq %d", irq);
-
-       rc = ata_host_register(host, sht);
-       if (rc)
-               devm_free_irq(host->dev, irq, host);
-
-       return rc;
-}
-
 /**
  *     ahci_host_activate - start AHCI host, request IRQs and register it
  *     @host: target ATA host
@@ -2532,7 +2487,8 @@ int ahci_host_activate(struct ata_host *host, int irq,
        if (hpriv->flags & AHCI_HFLAG_MULTI_MSI)
                rc = ahci_host_activate_multi_irqs(host, irq, sht);
        else
-               rc = ahci_host_activate_single_irq(host, irq, sht);
+               rc = ata_host_activate(host, irq, ahci_single_irq_intr,
+                                      IRQF_SHARED, sht);
        return rc;
 }
 EXPORT_SYMBOL_GPL(ahci_host_activate);
index 07bc7e4dbd04b836722d42b1051dc3da33f99a9d..65071591b143f5b0914b3ba37c8a0ac8f16f01fb 100644 (file)
@@ -1488,7 +1488,7 @@ static int sata_fsl_probe(struct platform_device *ofdev)
        host_priv->csr_base = csr_base;
 
        irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
-       if (irq < 0) {
+       if (!irq) {
                dev_err(&ofdev->dev, "invalid irq from platform\n");
                goto error_exit_with_cleanup;
        }
index 61eb6d77dac7f507403578a283eade305378f566..ea1fbc1d4c5f1134d05e69a8e9d322c3f54d0789 100644 (file)
 enum sata_rcar_type {
        RCAR_GEN1_SATA,
        RCAR_GEN2_SATA,
+       RCAR_R8A7790_ES1_SATA,
 };
 
 struct sata_rcar_priv {
@@ -763,6 +764,9 @@ static void sata_rcar_setup_port(struct ata_host *host)
        ap->udma_mask   = ATA_UDMA6;
        ap->flags       |= ATA_FLAG_SATA;
 
+       if (priv->type == RCAR_R8A7790_ES1_SATA)
+               ap->flags       |= ATA_FLAG_NO_DIPM;
+
        ioaddr->cmd_addr = base + SDATA_REG;
        ioaddr->ctl_addr = base + SSDEVCON_REG;
        ioaddr->scr_addr = base + SCRSSTS_REG;
@@ -792,6 +796,7 @@ static void sata_rcar_init_controller(struct ata_host *host)
                sata_rcar_gen1_phy_init(priv);
                break;
        case RCAR_GEN2_SATA:
+       case RCAR_R8A7790_ES1_SATA:
                sata_rcar_gen2_phy_init(priv);
                break;
        default:
@@ -837,10 +842,18 @@ static struct of_device_id sata_rcar_match[] = {
                .compatible = "renesas,sata-r8a7790",
                .data = (void *)RCAR_GEN2_SATA
        },
+       {
+               .compatible = "renesas,sata-r8a7790-es1",
+               .data = (void *)RCAR_R8A7790_ES1_SATA
+       },
        {
                .compatible = "renesas,sata-r8a7791",
                .data = (void *)RCAR_GEN2_SATA
        },
+       {
+               .compatible = "renesas,sata-r8a7793",
+               .data = (void *)RCAR_GEN2_SATA
+       },
        { },
 };
 MODULE_DEVICE_TABLE(of, sata_rcar_match);
@@ -849,7 +862,9 @@ static const struct platform_device_id sata_rcar_id_table[] = {
        { "sata_rcar", RCAR_GEN1_SATA }, /* Deprecated by "sata-r8a7779" */
        { "sata-r8a7779", RCAR_GEN1_SATA },
        { "sata-r8a7790", RCAR_GEN2_SATA },
+       { "sata-r8a7790-es1", RCAR_R8A7790_ES1_SATA },
        { "sata-r8a7791", RCAR_GEN2_SATA },
+       { "sata-r8a7793", RCAR_GEN2_SATA },
        { },
 };
 MODULE_DEVICE_TABLE(platform, sata_rcar_id_table);
index 7652e8dc188f93036e03a23a99ac7aee3b543811..21b0bc6a9c969ea677630a827f69c45545a9e78a 100644 (file)
@@ -1225,11 +1225,13 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
        card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE);
        if (!card->config_regs) {
                dev_warn(&dev->dev, "Failed to ioremap config registers\n");
+               err = -ENOMEM;
                goto out_release_regions;
        }
        card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE);
        if (!card->buffers) {
                dev_warn(&dev->dev, "Failed to ioremap data buffers\n");
+               err = -ENOMEM;
                goto out_unmap_config;
        }
 
index 61a33f4ba608cdc2fe01f691f46f92b2aba1d3e7..df04227d00cfae6fd729e32e5a28d354c501331f 100644 (file)
@@ -171,20 +171,23 @@ config WANT_DEV_COREDUMP
          Drivers should "select" this option if they desire to use the
          device coredump mechanism.
 
-config DISABLE_DEV_COREDUMP
-       bool "Disable device coredump" if EXPERT
+config ALLOW_DEV_COREDUMP
+       bool "Allow device coredump" if EXPERT
+       default y
        help
-         Disable the device coredump mechanism despite drivers wanting to
-         use it; this allows for more sensitive systems or systems that
-         don't want to ever access the information to not have the code,
-         nor keep any data.
+         This option controls if the device coredump mechanism is available or
+         not; if disabled, the mechanism will be omitted even if drivers that
+         can use it are enabled.
+         Say 'N' for more sensitive systems or systems that don't want
+         to ever access the information to not have the code, nor keep any
+         data.
 
-         If unsure, say N.
+         If unsure, say Y.
 
 config DEV_COREDUMP
        bool
        default y if WANT_DEV_COREDUMP
-       depends on !DISABLE_DEV_COREDUMP
+       depends on ALLOW_DEV_COREDUMP
 
 config DEBUG_DRIVER
        bool "Driver Core verbose debug messages"
index 14d162952c3bc21bdadf3966ccef0c7b2c43df91..842d04707de6b04dc6eec9f5aab9acb8885d235a 100644 (file)
@@ -724,12 +724,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
        return &dir->kobj;
 }
 
+static DEFINE_MUTEX(gdp_mutex);
 
 static struct kobject *get_device_parent(struct device *dev,
                                         struct device *parent)
 {
        if (dev->class) {
-               static DEFINE_MUTEX(gdp_mutex);
                struct kobject *kobj = NULL;
                struct kobject *parent_kobj;
                struct kobject *k;
@@ -793,7 +793,9 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
            glue_dir->kset != &dev->class->p->glue_dirs)
                return;
 
+       mutex_lock(&gdp_mutex);
        kobject_put(glue_dir);
+       mutex_unlock(&gdp_mutex);
 }
 
 static void cleanup_device_parent(struct device *dev)
index 40bc2f4072cc28ea4138ae36b3b08cb96f2ed158..fb83d4acd400ef0c9e87fddf92f7c7f4da174487 100644 (file)
@@ -361,9 +361,19 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
        struct device *dev = pdd->dev;
        int ret = 0;
 
-       if (gpd_data->need_restore)
+       if (gpd_data->need_restore > 0)
                return 0;
 
+       /*
+        * If the value of the need_restore flag is still unknown at this point,
+        * we trust that pm_genpd_poweroff() has verified that the device is
+        * already runtime PM suspended.
+        */
+       if (gpd_data->need_restore < 0) {
+               gpd_data->need_restore = 1;
+               return 0;
+       }
+
        mutex_unlock(&genpd->lock);
 
        genpd_start_dev(genpd, dev);
@@ -373,7 +383,7 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
        mutex_lock(&genpd->lock);
 
        if (!ret)
-               gpd_data->need_restore = true;
+               gpd_data->need_restore = 1;
 
        return ret;
 }
@@ -389,12 +399,17 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
 {
        struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
        struct device *dev = pdd->dev;
-       bool need_restore = gpd_data->need_restore;
+       int need_restore = gpd_data->need_restore;
 
-       gpd_data->need_restore = false;
+       gpd_data->need_restore = 0;
        mutex_unlock(&genpd->lock);
 
        genpd_start_dev(genpd, dev);
+
+       /*
+        * Call genpd_restore_dev() for recently added devices too (need_restore
+        * is negative then).
+        */
        if (need_restore)
                genpd_restore_dev(genpd, dev);
 
@@ -603,6 +618,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
 static int pm_genpd_runtime_suspend(struct device *dev)
 {
        struct generic_pm_domain *genpd;
+       struct generic_pm_domain_data *gpd_data;
        bool (*stop_ok)(struct device *__dev);
        int ret;
 
@@ -628,6 +644,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
                return 0;
 
        mutex_lock(&genpd->lock);
+
+       /*
+        * If we have an unknown state of the need_restore flag, it means none
+        * of the runtime PM callbacks has been invoked yet. Let's update the
+        * flag to reflect that the current state is active.
+        */
+       gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+       if (gpd_data->need_restore < 0)
+               gpd_data->need_restore = 0;
+
        genpd->in_progress++;
        pm_genpd_poweroff(genpd);
        genpd->in_progress--;
@@ -1437,12 +1463,12 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
        spin_unlock_irq(&dev->power.lock);
 
        if (genpd->attach_dev)
-               genpd->attach_dev(dev);
+               genpd->attach_dev(genpd, dev);
 
        mutex_lock(&gpd_data->lock);
        gpd_data->base.dev = dev;
        list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
-       gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
+       gpd_data->need_restore = -1;
        gpd_data->td.constraint_changed = true;
        gpd_data->td.effective_constraint_ns = -1;
        mutex_unlock(&gpd_data->lock);
@@ -1499,7 +1525,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
        genpd->max_off_time_changed = true;
 
        if (genpd->detach_dev)
-               genpd->detach_dev(dev);
+               genpd->detach_dev(genpd, dev);
 
        spin_lock_irq(&dev->power.lock);
 
@@ -1546,7 +1572,7 @@ void pm_genpd_dev_need_restore(struct device *dev, bool val)
 
        psd = dev_to_psd(dev);
        if (psd && psd->domain_data)
-               to_gpd_data(psd->domain_data)->need_restore = val;
+               to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0;
 
        spin_unlock_irqrestore(&dev->power.lock, flags);
 }
index 0a54c588e433751f39084459193a5da7700d34db..27b71a0b72d0959e63fe2ea3efaeaaaa627a4e5d 100644 (file)
@@ -342,7 +342,6 @@ struct rbd_device {
 
        struct list_head        rq_queue;       /* incoming rq queue */
        spinlock_t              lock;           /* queue, flags, open_count */
-       struct workqueue_struct *rq_wq;
        struct work_struct      rq_work;
 
        struct rbd_image_header header;
@@ -402,6 +401,8 @@ static struct kmem_cache    *rbd_segment_name_cache;
 static int rbd_major;
 static DEFINE_IDA(rbd_dev_id_ida);
 
+static struct workqueue_struct *rbd_wq;
+
 /*
  * Default to false for now, as single-major requires >= 0.75 version of
  * userspace rbd utility.
@@ -3452,7 +3453,7 @@ static void rbd_request_fn(struct request_queue *q)
        }
 
        if (queued)
-               queue_work(rbd_dev->rq_wq, &rbd_dev->rq_work);
+               queue_work(rbd_wq, &rbd_dev->rq_work);
 }
 
 /*
@@ -3532,7 +3533,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
        page_count = (u32) calc_pages_for(offset, length);
        pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
        if (IS_ERR(pages))
-               ret = PTR_ERR(pages);
+               return PTR_ERR(pages);
 
        ret = -ENOMEM;
        obj_request = rbd_obj_request_create(object_name, offset, length,
@@ -5242,16 +5243,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
        set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
        set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
 
-       rbd_dev->rq_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
-                                        rbd_dev->disk->disk_name);
-       if (!rbd_dev->rq_wq) {
-               ret = -ENOMEM;
-               goto err_out_mapping;
-       }
-
        ret = rbd_bus_add_dev(rbd_dev);
        if (ret)
-               goto err_out_workqueue;
+               goto err_out_mapping;
 
        /* Everything's ready.  Announce the disk to the world. */
 
@@ -5263,9 +5257,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
 
        return ret;
 
-err_out_workqueue:
-       destroy_workqueue(rbd_dev->rq_wq);
-       rbd_dev->rq_wq = NULL;
 err_out_mapping:
        rbd_dev_mapping_clear(rbd_dev);
 err_out_disk:
@@ -5512,7 +5503,6 @@ static void rbd_dev_device_release(struct device *dev)
 {
        struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
 
-       destroy_workqueue(rbd_dev->rq_wq);
        rbd_free_disk(rbd_dev);
        clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
        rbd_dev_mapping_clear(rbd_dev);
@@ -5716,11 +5706,21 @@ static int __init rbd_init(void)
        if (rc)
                return rc;
 
+       /*
+        * The number of active work items is limited by the number of
+        * rbd devices, so leave @max_active at default.
+        */
+       rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
+       if (!rbd_wq) {
+               rc = -ENOMEM;
+               goto err_out_slab;
+       }
+
        if (single_major) {
                rbd_major = register_blkdev(0, RBD_DRV_NAME);
                if (rbd_major < 0) {
                        rc = rbd_major;
-                       goto err_out_slab;
+                       goto err_out_wq;
                }
        }
 
@@ -5738,6 +5738,8 @@ static int __init rbd_init(void)
 err_out_blkdev:
        if (single_major)
                unregister_blkdev(rbd_major, RBD_DRV_NAME);
+err_out_wq:
+       destroy_workqueue(rbd_wq);
 err_out_slab:
        rbd_slab_exit();
        return rc;
@@ -5749,6 +5751,7 @@ static void __exit rbd_exit(void)
        rbd_sysfs_cleanup();
        if (single_major)
                unregister_blkdev(rbd_major, RBD_DRV_NAME);
+       destroy_workqueue(rbd_wq);
        rbd_slab_exit();
 }
 
index 2ad0b5bce44be89494d9cb5f75da19b9e459cd10..3920ee45aa5942dd816a775180eeb16f662e804c 100644 (file)
@@ -560,7 +560,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
        }
 
        if (page_zero_filled(uncmem)) {
-               kunmap_atomic(user_mem);
+               if (user_mem)
+                       kunmap_atomic(user_mem);
                /* Free memory associated with this sector now. */
                bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
                zram_free_page(zram, index);
index 9a024f899dd40a040ccede1571e1e2919e61bea9..f3334829e55a3e6ac393a348c6da0e7aef159709 100644 (file)
@@ -153,7 +153,6 @@ static struct page *i8xx_alloc_pages(void)
                __free_pages(page, 2);
                return NULL;
        }
-       get_page(page);
        atomic_inc(&agp_bridge->current_memory_agp);
        return page;
 }
@@ -164,7 +163,6 @@ static void i8xx_destroy_pages(struct page *page)
                return;
 
        set_pages_wb(page, 4);
-       put_page(page);
        __free_pages(page, 2);
        atomic_dec(&agp_bridge->current_memory_agp);
 }
@@ -300,7 +298,6 @@ static int intel_gtt_setup_scratch_page(void)
        page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
        if (page == NULL)
                return -ENOMEM;
-       get_page(page);
        set_pages_uc(page, 1);
 
        if (intel_private.needs_dmar) {
@@ -560,7 +557,6 @@ static void intel_gtt_teardown_scratch_page(void)
        set_pages_wb(intel_private.scratch_page, 1);
        pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       put_page(intel_private.scratch_page);
        __free_page(intel_private.scratch_page);
 }
 
index 6226aa08c36af59b9406b5a278880537e16ac9a4..bcf86f91800a2c1810791a4e3a508d490de0d8f9 100644 (file)
 #include <asm/vio.h>
 
 
-static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
+static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
 {
+       u64 buffer[PLPAR_HCALL_BUFSIZE];
+       size_t size = max < 8 ? max : 8;
        int rc;
 
-       rc = plpar_hcall(H_RANDOM, (unsigned long *)data);
+       rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
        if (rc != H_SUCCESS) {
                pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
                return -EIO;
        }
+       memcpy(data, buffer, size);
 
        /* The hypervisor interface returns 64 bits */
-       return 8;
+       return size;
 }
 
 /**
@@ -55,7 +58,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
 
 static struct hwrng pseries_rng = {
        .name           = KBUILD_MODNAME,
-       .data_read      = pseries_rng_data_read,
+       .read           = pseries_rng_read,
 };
 
 static int __init pseries_rng_probe(struct vio_dev *dev,
index bfa640023e64893251d2cb2cf54af5a853d89568..cf7a561fad7cd9f3cdd94fe16e5c22d65318428c 100644 (file)
@@ -1449,8 +1449,6 @@ static int add_port(struct ports_device *portdev, u32 id)
        spin_lock_init(&port->outvq_lock);
        init_waitqueue_head(&port->waitqueue);
 
-       virtio_device_ready(portdev->vdev);
-
        /* Fill the in_vq with buffers so the host can send us data. */
        nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
        if (!nr_added_bufs) {
@@ -2026,6 +2024,8 @@ static int virtcons_probe(struct virtio_device *vdev)
        spin_lock_init(&portdev->ports_lock);
        INIT_LIST_HEAD(&portdev->ports);
 
+       virtio_device_ready(portdev->vdev);
+
        if (multiport) {
                unsigned int nr_added_bufs;
 
index 24b5b020753a9e4a66a5d3db7c8f7ad8bee0b928..a23ac0c724f014643e66bc2485f7c79cef523920 100644 (file)
@@ -52,29 +52,26 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
 
        tmp = pmc_read(pmc, AT91_PMC_USB);
        usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT;
-       return parent_rate / (usbdiv + 1);
+
+       return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
 }
 
 static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
                                          unsigned long *parent_rate)
 {
        unsigned long div;
-       unsigned long bestrate;
-       unsigned long tmp;
+
+       if (!rate)
+               return -EINVAL;
 
        if (rate >= *parent_rate)
                return *parent_rate;
 
-       div = *parent_rate / rate;
-       if (div >= SAM9X5_USB_MAX_DIV)
-               return *parent_rate / (SAM9X5_USB_MAX_DIV + 1);
-
-       bestrate = *parent_rate / div;
-       tmp = *parent_rate / (div + 1);
-       if (bestrate - rate > rate - tmp)
-               bestrate = tmp;
+       div = DIV_ROUND_CLOSEST(*parent_rate, rate);
+       if (div > SAM9X5_USB_MAX_DIV + 1)
+               div = SAM9X5_USB_MAX_DIV + 1;
 
-       return bestrate;
+       return DIV_ROUND_CLOSEST(*parent_rate, div);
 }
 
 static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
@@ -106,9 +103,13 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
        u32 tmp;
        struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
        struct at91_pmc *pmc = usb->pmc;
-       unsigned long div = parent_rate / rate;
+       unsigned long div;
+
+       if (!rate)
+               return -EINVAL;
 
-       if (parent_rate % rate || div < 1 || div >= SAM9X5_USB_MAX_DIV)
+       div = DIV_ROUND_CLOSEST(parent_rate, rate);
+       if (div > SAM9X5_USB_MAX_DIV + 1 || !div)
                return -EINVAL;
 
        tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV;
@@ -253,7 +254,7 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
 
                tmp_parent_rate = rate * usb->divisors[i];
                tmp_parent_rate = __clk_round_rate(parent, tmp_parent_rate);
-               tmprate = tmp_parent_rate / usb->divisors[i];
+               tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]);
                if (tmprate < rate)
                        tmpdiff = rate - tmprate;
                else
@@ -281,10 +282,10 @@ static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
        struct at91_pmc *pmc = usb->pmc;
        unsigned long div;
 
-       if (!rate || parent_rate % rate)
+       if (!rate)
                return -EINVAL;
 
-       div = parent_rate / rate;
+       div = DIV_ROUND_CLOSEST(parent_rate, rate);
 
        for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) {
                if (usb->divisors[i] == div) {
index 18a9de29df0e0c31dadd3de0b2bdb2485fab2733..c0a842b335c520c6c28f08308a1b62a743038dd3 100644 (file)
@@ -263,6 +263,14 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
        if (!rate)
                rate = 1;
 
+       /* if read only, just return current value */
+       if (divider->flags & CLK_DIVIDER_READ_ONLY) {
+               bestdiv = readl(divider->reg) >> divider->shift;
+               bestdiv &= div_mask(divider);
+               bestdiv = _get_div(divider, bestdiv);
+               return bestdiv;
+       }
+
        maxdiv = _get_maxdiv(divider);
 
        if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
@@ -361,11 +369,6 @@ const struct clk_ops clk_divider_ops = {
 };
 EXPORT_SYMBOL_GPL(clk_divider_ops);
 
-const struct clk_ops clk_divider_ro_ops = {
-       .recalc_rate = clk_divider_recalc_rate,
-};
-EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
-
 static struct clk *_register_divider(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 shift, u8 width,
@@ -391,10 +394,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
        }
 
        init.name = name;
-       if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
-               init.ops = &clk_divider_ro_ops;
-       else
-               init.ops = &clk_divider_ops;
+       init.ops = &clk_divider_ops;
        init.flags = flags | CLK_IS_BASIC;
        init.parent_names = (parent_name ? &parent_name: NULL);
        init.num_parents = (parent_name ? 1 : 0);
index b345cc791e5defdeeb57d0b8df4d566bd41aef2c..88b9fe13fa444b2a81a3bd8a2588b035357d0048 100644 (file)
@@ -322,7 +322,7 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
        unsigned long ccsr = CCSR;
 
        osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
-       a = cccr & CCCR_A_BIT;
+       a = cccr & (1 << CCCR_A_BIT);
        l  = ccsr & CCSR_L_MASK;
 
        if (osc_forced || a)
@@ -341,7 +341,7 @@ static u8 clk_pxa27x_memory_get_parent(struct clk_hw *hw)
        unsigned long ccsr = CCSR;
 
        osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
-       a = cccr & CCCR_A_BIT;
+       a = cccr & (1 << CCCR_A_BIT);
        if (osc_forced)
                return PXA_MEM_13Mhz;
        if (a)
index dab988ab8cf12740ac931c5f5efaa39b90887ec3..157139a5c1ca956d76d1be30dfb6687f82d01816 100644 (file)
@@ -3122,7 +3122,7 @@ static struct clk_regmap *mmcc_apq8084_clocks[] = {
        [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
        [HDMI_CLK_SRC] = &hdmi_clk_src.clkr,
        [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
-       [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
+       [MMSS_RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
        [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
        [MAPLE_CLK_SRC] = &maple_clk_src.clkr,
        [VDP_CLK_SRC] = &vdp_clk_src.clkr,
index 1e68bff481b8e32ec440959002a2467287c269da..880a266f01431b3b9e7040565d3a3e81f0716a8b 100644 (file)
@@ -90,9 +90,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
                div->width = div_width;
                div->lock = lock;
                div->table = div_table;
-               div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
-                                               ? &clk_divider_ro_ops
-                                               : &clk_divider_ops;
+               div_ops = &clk_divider_ops;
        }
 
        clk = clk_register_composite(NULL, name, parent_names, num_parents,
index efb17c3ee120e5ee28fa05099c4c3c7ce09f0ac1..f4a9c0058b4d677382863a12bf887b40202f63fe 100644 (file)
@@ -182,6 +182,12 @@ static void __init sun4i_timer_init(struct device_node *node)
        /* Make sure timer is stopped before playing with interrupts */
        sun4i_clkevt_time_stop(0);
 
+       sun4i_clockevent.cpumask = cpu_possible_mask;
+       sun4i_clockevent.irq = irq;
+
+       clockevents_config_and_register(&sun4i_clockevent, rate,
+                                       TIMER_SYNC_TICKS, 0xffffffff);
+
        ret = setup_irq(irq, &sun4i_timer_irq);
        if (ret)
                pr_warn("failed to setup irq %d\n", irq);
@@ -189,12 +195,6 @@ static void __init sun4i_timer_init(struct device_node *node)
        /* Enable timer0 interrupt */
        val = readl(timer_base + TIMER_IRQ_EN_REG);
        writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
-
-       sun4i_clockevent.cpumask = cpu_possible_mask;
-       sun4i_clockevent.irq = irq;
-
-       clockevents_config_and_register(&sun4i_clockevent, rate,
-                                       TIMER_SYNC_TICKS, 0xffffffff);
 }
 CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
                       sun4i_timer_init);
index 23aaf40cf37f5b6b7e5416a0125e62d581864079..f657c571b18e4e6baaa52250640aac4e1fa26267 100644 (file)
@@ -166,8 +166,8 @@ try_again:
                if (ret == -EPROBE_DEFER)
                        dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
                else
-                       dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", ret,
-                               cpu);
+                       dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
+                               ret);
        } else {
                *cdev = cpu_dev;
                *creg = cpu_reg;
index 644b54e1e7d13e8ebc863f9bb8c694278d999227..4473eba1d6b0b6084f632a8cb25e7c7cd815170b 100644 (file)
@@ -1022,7 +1022,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
 
        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-       policy->governor = NULL;
+       if (policy)
+               policy->governor = NULL;
 
        return policy;
 }
index 871703c49d2c09923d162af7651518d46a5ff3a4..e1eaf4ff9762646acac020c85c77bb122c570f0e 100644 (file)
@@ -48,23 +48,29 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
        u32 *desc;
        struct split_key_result result;
        dma_addr_t dma_addr_in, dma_addr_out;
-       int ret = 0;
+       int ret = -ENOMEM;
 
        desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
        if (!desc) {
                dev_err(jrdev, "unable to allocate key input memory\n");
-               return -ENOMEM;
+               return ret;
        }
 
-       init_job_desc(desc, 0);
-
        dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
                                     DMA_TO_DEVICE);
        if (dma_mapping_error(jrdev, dma_addr_in)) {
                dev_err(jrdev, "unable to map key input memory\n");
-               kfree(desc);
-               return -ENOMEM;
+               goto out_free;
        }
+
+       dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+                                     DMA_FROM_DEVICE);
+       if (dma_mapping_error(jrdev, dma_addr_out)) {
+               dev_err(jrdev, "unable to map key output memory\n");
+               goto out_unmap_in;
+       }
+
+       init_job_desc(desc, 0);
        append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
 
        /* Sets MDHA up into an HMAC-INIT */
@@ -81,13 +87,6 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
         * FIFO_STORE with the explicit split-key content store
         * (0x26 output type)
         */
-       dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
-                                     DMA_FROM_DEVICE);
-       if (dma_mapping_error(jrdev, dma_addr_out)) {
-               dev_err(jrdev, "unable to map key output memory\n");
-               kfree(desc);
-               return -ENOMEM;
-       }
        append_fifo_store(desc, dma_addr_out, split_key_len,
                          LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
 
@@ -115,10 +114,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
 
        dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
                         DMA_FROM_DEVICE);
+out_unmap_in:
        dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
-
+out_free:
        kfree(desc);
-
        return ret;
 }
 EXPORT_SYMBOL(gen_split_key);
index 9282381b03ced19b0e890a10e51421224276be69..fe7b3f06f6e62ac9ab1e35a9bfb6a2c3f267c35a 100644 (file)
@@ -198,8 +198,7 @@ struct adf_accel_dev {
        struct dentry *debugfs_dir;
        struct list_head list;
        struct module *owner;
-       uint8_t accel_id;
-       uint8_t numa_node;
        struct adf_accel_pci accel_pci_dev;
+       uint8_t accel_id;
 } __packed;
 #endif
index 5f3fa45348b46c6c5f22093c7e5426ac43c4ec6d..9dd2cb72a4e862e8203b9a0ffdc109ce4480c0be 100644 (file)
@@ -419,9 +419,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
                WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
                ring = &bank->rings[i];
                if (hw_data->tx_rings_mask & (1 << i)) {
-                       ring->inflights = kzalloc_node(sizeof(atomic_t),
-                                                      GFP_KERNEL,
-                                                      accel_dev->numa_node);
+                       ring->inflights =
+                               kzalloc_node(sizeof(atomic_t),
+                                            GFP_KERNEL,
+                                            dev_to_node(&GET_DEV(accel_dev)));
                        if (!ring->inflights)
                                goto err;
                } else {
@@ -469,13 +470,14 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
        int i, ret;
 
        etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
-                               accel_dev->numa_node);
+                               dev_to_node(&GET_DEV(accel_dev)));
        if (!etr_data)
                return -ENOMEM;
 
        num_banks = GET_MAX_BANKS(accel_dev);
        size = num_banks * sizeof(struct adf_etr_bank_data);
-       etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node);
+       etr_data->banks = kzalloc_node(size, GFP_KERNEL,
+                                      dev_to_node(&GET_DEV(accel_dev)));
        if (!etr_data->banks) {
                ret = -ENOMEM;
                goto err_bank;
index f2e2f158cfbecec1bbef49a5b061d824c2365b3e..9e9619cd4a79b958dae8e6ceee84d0d267325322 100644 (file)
@@ -596,7 +596,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
        if (unlikely(!n))
                return -EINVAL;
 
-       bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
+       bufl = kmalloc_node(sz, GFP_ATOMIC,
+                           dev_to_node(&GET_DEV(inst->accel_dev)));
        if (unlikely(!bufl))
                return -ENOMEM;
 
@@ -605,6 +606,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
                goto err;
 
        for_each_sg(assoc, sg, assoc_n, i) {
+               if (!sg->length)
+                       continue;
                bufl->bufers[bufs].addr = dma_map_single(dev,
                                                         sg_virt(sg),
                                                         sg->length,
@@ -640,7 +643,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
                struct qat_alg_buf *bufers;
 
                buflout = kmalloc_node(sz, GFP_ATOMIC,
-                                      inst->accel_dev->numa_node);
+                                      dev_to_node(&GET_DEV(inst->accel_dev)));
                if (unlikely(!buflout))
                        goto err;
                bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
index 0d59bcb50de151c9355705ff39d18b9f600eb313..828f2a686aab26d474592829de8ac47f8f83c4fe 100644 (file)
@@ -109,12 +109,14 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
 
        list_for_each(itr, adf_devmgr_get_head()) {
                accel_dev = list_entry(itr, struct adf_accel_dev, list);
-               if (accel_dev->numa_node == node && adf_dev_started(accel_dev))
+               if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
+                       dev_to_node(&GET_DEV(accel_dev)) < 0)
+                               && adf_dev_started(accel_dev))
                        break;
                accel_dev = NULL;
        }
        if (!accel_dev) {
-               pr_err("QAT: Could not find device on give node\n");
+               pr_err("QAT: Could not find device on node %d\n", node);
                accel_dev = adf_devmgr_get_first();
        }
        if (!accel_dev || !adf_dev_started(accel_dev))
@@ -164,7 +166,7 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
 
        for (i = 0; i < num_inst; i++) {
                inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
-                                   accel_dev->numa_node);
+                                   dev_to_node(&GET_DEV(accel_dev)));
                if (!inst)
                        goto err;
 
index 978d6c56639df105ffce07b9037d0556329c02c6..53c491b59f07c59ec9b4264b215078a8fe368be8 100644 (file)
@@ -108,7 +108,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
        uint64_t reg_val;
 
        admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
-                            accel_dev->numa_node);
+                            dev_to_node(&GET_DEV(accel_dev)));
        if (!admin)
                return -ENOMEM;
        admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
index 0d0435a41be996d239517e3325a36631802bb9d2..948f66be262b31eeeb51418696a3e482eb82736c 100644 (file)
@@ -119,21 +119,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
        kfree(accel_dev);
 }
 
-static uint8_t adf_get_dev_node_id(struct pci_dev *pdev)
-{
-       unsigned int bus_per_cpu = 0;
-       struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1);
-
-       if (!c->phys_proc_id)
-               return 0;
-
-       bus_per_cpu = 256 / (c->phys_proc_id + 1);
-
-       if (bus_per_cpu != 0)
-               return pdev->bus->number / bus_per_cpu;
-       return 0;
-}
-
 static int qat_dev_start(struct adf_accel_dev *accel_dev)
 {
        int cpus = num_online_cpus();
@@ -235,7 +220,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        void __iomem *pmisc_bar_addr = NULL;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       uint8_t node;
        int ret;
 
        switch (ent->device) {
@@ -246,12 +230,19 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                return -ENODEV;
        }
 
-       node = adf_get_dev_node_id(pdev);
-       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node);
+       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+               /* If the accelerator is connected to a node with no memory
+                * there is no point in using the accelerator since the remote
+                * memory transaction will be very slow. */
+               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+               return -EINVAL;
+       }
+
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+                                dev_to_node(&pdev->dev));
        if (!accel_dev)
                return -ENOMEM;
 
-       accel_dev->numa_node = node;
        INIT_LIST_HEAD(&accel_dev->crypto_list);
 
        /* Add accel device to accel table.
@@ -264,7 +255,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        accel_dev->owner = THIS_MODULE;
        /* Allocate and configure device configuration structure */
-       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node);
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+                              dev_to_node(&pdev->dev));
        if (!hw_data) {
                ret = -ENOMEM;
                goto out_err;
index 67ec61e51185b3e402da164d0c15b4b464d53ec6..d96ee21b9b77815f8ed018a78a36d43db7fc3c95 100644 (file)
@@ -168,7 +168,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
        uint32_t msix_num_entries = hw_data->num_banks + 1;
 
        entries = kzalloc_node(msix_num_entries * sizeof(*entries),
-                              GFP_KERNEL, accel_dev->numa_node);
+                              GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
        if (!entries)
                return -ENOMEM;
 
index 123f578d6dd3b82d37887182fc092937c40e59d4..4cfaaa5a49bec6655b0e6588bae04503a82f17bc 100644 (file)
@@ -1107,52 +1107,14 @@ bool edma_filter_fn(struct dma_chan *chan, void *param)
 }
 EXPORT_SYMBOL(edma_filter_fn);
 
-static struct platform_device *pdev0, *pdev1;
-
-static const struct platform_device_info edma_dev_info0 = {
-       .name = "edma-dma-engine",
-       .id = 0,
-       .dma_mask = DMA_BIT_MASK(32),
-};
-
-static const struct platform_device_info edma_dev_info1 = {
-       .name = "edma-dma-engine",
-       .id = 1,
-       .dma_mask = DMA_BIT_MASK(32),
-};
-
 static int edma_init(void)
 {
-       int ret = platform_driver_register(&edma_driver);
-
-       if (ret == 0) {
-               pdev0 = platform_device_register_full(&edma_dev_info0);
-               if (IS_ERR(pdev0)) {
-                       platform_driver_unregister(&edma_driver);
-                       ret = PTR_ERR(pdev0);
-                       goto out;
-               }
-       }
-
-       if (!of_have_populated_dt() && EDMA_CTLRS == 2) {
-               pdev1 = platform_device_register_full(&edma_dev_info1);
-               if (IS_ERR(pdev1)) {
-                       platform_driver_unregister(&edma_driver);
-                       platform_device_unregister(pdev0);
-                       ret = PTR_ERR(pdev1);
-               }
-       }
-
-out:
-       return ret;
+       return platform_driver_register(&edma_driver);
 }
 subsys_initcall(edma_init);
 
 static void __exit edma_exit(void)
 {
-       platform_device_unregister(pdev0);
-       if (pdev1)
-               platform_device_unregister(pdev1);
        platform_driver_unregister(&edma_driver);
 }
 module_exit(edma_exit);
index 4839bfa74a107a1ad4cbdfb09017d86da1fd1a41..19a99743cf524670d5906400d286cc877bcf91d6 100644 (file)
@@ -271,7 +271,7 @@ struct pl330_config {
 #define DMAC_MODE_NS   (1 << 0)
        unsigned int    mode;
        unsigned int    data_bus_width:10; /* In number of bits */
-       unsigned int    data_buf_dep:10;
+       unsigned int    data_buf_dep:11;
        unsigned int    num_chan:4;
        unsigned int    num_peri:6;
        u32             peri_ns;
@@ -2336,7 +2336,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
        int burst_len;
 
        burst_len = pl330->pcfg.data_bus_width / 8;
-       burst_len *= pl330->pcfg.data_buf_dep;
+       burst_len *= pl330->pcfg.data_buf_dep / pl330->pcfg.num_chan;
        burst_len >>= desc->rqcfg.brst_size;
 
        /* src/dst_burst_len can't be more than 16 */
@@ -2459,16 +2459,25 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
        /* Select max possible burst size */
        burst = pl330->pcfg.data_bus_width / 8;
 
-       while (burst > 1) {
-               if (!(len % burst))
-                       break;
+       /*
+        * Make sure we use a burst size that aligns with all the memcpy
+        * parameters because our DMA programming algorithm doesn't cope with
+        * transfers which straddle an entry in the DMA device's MFIFO.
+        */
+       while ((src | dst | len) & (burst - 1))
                burst /= 2;
-       }
 
        desc->rqcfg.brst_size = 0;
        while (burst != (1 << desc->rqcfg.brst_size))
                desc->rqcfg.brst_size++;
 
+       /*
+        * If burst size is smaller than bus width then make sure we only
+        * transfer one at a time to avoid a burst stradling an MFIFO entry.
+        */
+       if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
+               desc->rqcfg.brst_len = 1;
+
        desc->rqcfg.brst_len = get_burst_len(desc, len);
 
        desc->txd.flags = flags;
@@ -2732,7 +2741,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
 
 
        dev_info(&adev->dev,
-               "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
+               "Loaded driver for PL330 DMAC-%x\n", adev->periphid);
        dev_info(&adev->dev,
                "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
                pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan,
index 3aa10b32825491dce9d5c0ad244ddc53f78df12e..91292f5513ff2df6d051b784fcb81d276f6e6c63 100644 (file)
@@ -230,30 +230,25 @@ static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev,
                readl(pchan->base + DMA_CHAN_CUR_PARA));
 }
 
-static inline int convert_burst(u32 maxburst, u8 *burst)
+static inline s8 convert_burst(u32 maxburst)
 {
        switch (maxburst) {
        case 1:
-               *burst = 0;
-               break;
+               return 0;
        case 8:
-               *burst = 2;
-               break;
+               return 2;
        default:
                return -EINVAL;
        }
-
-       return 0;
 }
 
-static inline int convert_buswidth(enum dma_slave_buswidth addr_width, u8 *width)
+static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width)
 {
        if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) ||
            (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES))
                return -EINVAL;
 
-       *width = addr_width >> 1;
-       return 0;
+       return addr_width >> 1;
 }
 
 static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev,
@@ -284,26 +279,25 @@ static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli,
                                    struct dma_slave_config *config)
 {
        u8 src_width, dst_width, src_burst, dst_burst;
-       int ret;
 
        if (!config)
                return -EINVAL;
 
-       ret = convert_burst(config->src_maxburst, &src_burst);
-       if (ret)
-               return ret;
+       src_burst = convert_burst(config->src_maxburst);
+       if (src_burst)
+               return src_burst;
 
-       ret = convert_burst(config->dst_maxburst, &dst_burst);
-       if (ret)
-               return ret;
+       dst_burst = convert_burst(config->dst_maxburst);
+       if (dst_burst)
+               return dst_burst;
 
-       ret = convert_buswidth(config->src_addr_width, &src_width);
-       if (ret)
-               return ret;
+       src_width = convert_buswidth(config->src_addr_width);
+       if (src_width)
+               return src_width;
 
-       ret = convert_buswidth(config->dst_addr_width, &dst_width);
-       if (ret)
-               return ret;
+       dst_width = convert_buswidth(config->dst_addr_width);
+       if (dst_width)
+               return dst_width;
 
        lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) |
                DMA_CHAN_CFG_SRC_WIDTH(src_width) |
@@ -542,11 +536,10 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
 {
        struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
        struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
-       struct dma_slave_config *sconfig = &vchan->cfg;
        struct sun6i_dma_lli *v_lli;
        struct sun6i_desc *txd;
        dma_addr_t p_lli;
-       int ret;
+       s8 burst, width;
 
        dev_dbg(chan2dev(chan),
                "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n",
@@ -565,14 +558,21 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
                goto err_txd_free;
        }
 
-       ret = sun6i_dma_cfg_lli(v_lli, src, dest, len, sconfig);
-       if (ret)
-               goto err_dma_free;
+       v_lli->src = src;
+       v_lli->dst = dest;
+       v_lli->len = len;
+       v_lli->para = NORMAL_WAIT;
 
+       burst = convert_burst(8);
+       width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES);
        v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
                DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
                DMA_CHAN_CFG_DST_LINEAR_MODE |
-               DMA_CHAN_CFG_SRC_LINEAR_MODE;
+               DMA_CHAN_CFG_SRC_LINEAR_MODE |
+               DMA_CHAN_CFG_SRC_BURST(burst) |
+               DMA_CHAN_CFG_SRC_WIDTH(width) |
+               DMA_CHAN_CFG_DST_BURST(burst) |
+               DMA_CHAN_CFG_DST_WIDTH(width);
 
        sun6i_dma_lli_add(NULL, v_lli, p_lli, txd);
 
@@ -580,8 +580,6 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
 
        return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
 
-err_dma_free:
-       dma_pool_free(sdev->pool, v_lli, p_lli);
 err_txd_free:
        kfree(txd);
        return NULL;
@@ -915,6 +913,7 @@ static int sun6i_dma_probe(struct platform_device *pdev)
        sdc->slave.device_prep_dma_memcpy       = sun6i_dma_prep_dma_memcpy;
        sdc->slave.device_control               = sun6i_dma_control;
        sdc->slave.chancnt                      = NR_MAX_VCHANS;
+       sdc->slave.copy_align                   = 4;
 
        sdc->slave.dev = &pdev->dev;
 
index 5d997a33907e431b895111fc27039468be244614..2a3973a7c44179457f635196696bc78e756a76e1 100644 (file)
@@ -1637,8 +1637,7 @@ static int dispatch_ioctl(struct client *client,
            _IOC_SIZE(cmd) > sizeof(buffer))
                return -ENOTTY;
 
-       if (_IOC_DIR(cmd) == _IOC_READ)
-               memset(&buffer, 0, _IOC_SIZE(cmd));
+       memset(&buffer, 0, sizeof(buffer));
 
        if (_IOC_DIR(cmd) & _IOC_WRITE)
                if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
index 3f1624b5fcde12dac97c8d5bae0a6247f83733ae..c3413b6adb17caec6ac0f273bfa80ef8bacca960 100644 (file)
@@ -202,3 +202,7 @@ source "drivers/gpu/drm/tegra/Kconfig"
 source "drivers/gpu/drm/panel/Kconfig"
 
 source "drivers/gpu/drm/sti/Kconfig"
+
+source "drivers/gpu/drm/amd/amdkfd/Kconfig"
+
+source "drivers/gpu/drm/imx/Kconfig"
index 14bf29e76693ae205aab3afa6496b6f015676ac5..66e40398b3d32220624cb7fad671c86058c84339 100644 (file)
@@ -14,7 +14,7 @@ drm-y       :=        drm_auth.o drm_bufs.o drm_cache.o \
                drm_info.o drm_debugfs.o drm_encoder_slave.o \
                drm_trace_points.o drm_global.o drm_prime.o \
                drm_rect.o drm_vma_manager.o drm_flip_work.o \
-               drm_modeset_lock.o
+               drm_modeset_lock.o drm_atomic.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -23,7 +23,7 @@ drm-$(CONFIG_DRM_PANEL) += drm_panel.o
 drm-$(CONFIG_OF) += drm_of.o
 
 drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
-               drm_plane_helper.o drm_dp_mst_topology.o
+               drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
@@ -63,6 +63,8 @@ obj-$(CONFIG_DRM_BOCHS) += bochs/
 obj-$(CONFIG_DRM_MSM) += msm/
 obj-$(CONFIG_DRM_TEGRA) += tegra/
 obj-$(CONFIG_DRM_STI) += sti/
+obj-$(CONFIG_DRM_IMX) += imx/
 obj-y                  += i2c/
 obj-y                  += panel/
 obj-y                  += bridge/
+obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
diff --git a/drivers/gpu/drm/README.drm b/drivers/gpu/drm/README.drm
deleted file mode 100644 (file)
index b5b3327..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-************************************************************
-* For the very latest on DRI development, please see:      *
-*     http://dri.freedesktop.org/                          *
-************************************************************
-
-The Direct Rendering Manager (drm) is a device-independent kernel-level
-device driver that provides support for the XFree86 Direct Rendering
-Infrastructure (DRI).
-
-The DRM supports the Direct Rendering Infrastructure (DRI) in four major
-ways:
-
-    1. The DRM provides synchronized access to the graphics hardware via
-       the use of an optimized two-tiered lock.
-
-    2. The DRM enforces the DRI security policy for access to the graphics
-       hardware by only allowing authenticated X11 clients access to
-       restricted regions of memory.
-
-    3. The DRM provides a generic DMA engine, complete with multiple
-       queues and the ability to detect the need for an OpenGL context
-       switch.
-
-    4. The DRM is extensible via the use of small device-specific modules
-       that rely extensively on the API exported by the DRM module.
-
-
-Documentation on the DRI is available from:
-    http://dri.freedesktop.org/wiki/Documentation
-    http://sourceforge.net/project/showfiles.php?group_id=387
-    http://dri.sourceforge.net/doc/
-
-For specific information about kernel-level support, see:
-
-    The Direct Rendering Manager, Kernel Support for the Direct Rendering
-    Infrastructure
-    http://dri.sourceforge.net/doc/drm_low_level.html
-
-    Hardware Locking for the Direct Rendering Infrastructure
-    http://dri.sourceforge.net/doc/hardware_locking_low_level.html
-
-    A Security Analysis of the Direct Rendering Infrastructure
-    http://dri.sourceforge.net/doc/security_low_level.html
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
new file mode 100644 (file)
index 0000000..8dfac37
--- /dev/null
@@ -0,0 +1,9 @@
+#
+# Heterogenous system architecture configuration
+#
+
+config HSA_AMD
+       tristate "HSA kernel driver for AMD GPU devices"
+       depends on DRM_RADEON && AMD_IOMMU_V2 && X86_64
+       help
+         Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
new file mode 100644 (file)
index 0000000..be6246d
--- /dev/null
@@ -0,0 +1,14 @@
+#
+# Makefile for Heterogenous System Architecture support for AMD GPU devices
+#
+
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/
+
+amdkfd-y       := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
+               kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
+               kfd_process.o kfd_queue.o kfd_mqd_manager.o \
+               kfd_kernel_queue.o kfd_packet_manager.o \
+               kfd_process_queue_manager.o kfd_device_queue_manager.o \
+               kfd_interrupt.o
+
+obj-$(CONFIG_HSA_AMD)  += amdkfd.o
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
new file mode 100644 (file)
index 0000000..607fc5c
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef CIK_REGS_H
+#define CIK_REGS_H
+
+#define IH_VMID_0_LUT                                  0x3D40u
+
+#define BIF_DOORBELL_CNTL                              0x530Cu
+
+#define        SRBM_GFX_CNTL                                   0xE44
+#define        PIPEID(x)                                       ((x) << 0)
+#define        MEID(x)                                         ((x) << 2)
+#define        VMID(x)                                         ((x) << 4)
+#define        QUEUEID(x)                                      ((x) << 8)
+
+#define        SQ_CONFIG                                       0x8C00
+
+#define        SH_MEM_BASES                                    0x8C28
+/* if PTR32, these are the bases for scratch and lds */
+#define        PRIVATE_BASE(x)                                 ((x) << 0) /* scratch */
+#define        SHARED_BASE(x)                                  ((x) << 16) /* LDS */
+#define        SH_MEM_APE1_BASE                                0x8C2C
+/* if PTR32, this is the base location of GPUVM */
+#define        SH_MEM_APE1_LIMIT                               0x8C30
+/* if PTR32, this is the upper limit of GPUVM */
+#define        SH_MEM_CONFIG                                   0x8C34
+#define        PTR32                                           (1 << 0)
+#define PRIVATE_ATC                                    (1 << 1)
+#define        ALIGNMENT_MODE(x)                               ((x) << 2)
+#define        SH_MEM_ALIGNMENT_MODE_DWORD                     0
+#define        SH_MEM_ALIGNMENT_MODE_DWORD_STRICT              1
+#define        SH_MEM_ALIGNMENT_MODE_STRICT                    2
+#define        SH_MEM_ALIGNMENT_MODE_UNALIGNED                 3
+#define        DEFAULT_MTYPE(x)                                ((x) << 4)
+#define        APE1_MTYPE(x)                                   ((x) << 7)
+
+/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
+#define        MTYPE_CACHED                                    0
+#define        MTYPE_NONCACHED                                 3
+
+
+#define SH_STATIC_MEM_CONFIG                           0x9604u
+
+#define        TC_CFG_L1_LOAD_POLICY0                          0xAC68
+#define        TC_CFG_L1_LOAD_POLICY1                          0xAC6C
+#define        TC_CFG_L1_STORE_POLICY                          0xAC70
+#define        TC_CFG_L2_LOAD_POLICY0                          0xAC74
+#define        TC_CFG_L2_LOAD_POLICY1                          0xAC78
+#define        TC_CFG_L2_STORE_POLICY0                         0xAC7C
+#define        TC_CFG_L2_STORE_POLICY1                         0xAC80
+#define        TC_CFG_L2_ATOMIC_POLICY                         0xAC84
+#define        TC_CFG_L1_VOLATILE                              0xAC88
+#define        TC_CFG_L2_VOLATILE                              0xAC8C
+
+#define CP_PQ_WPTR_POLL_CNTL                           0xC20C
+#define        WPTR_POLL_EN                                    (1 << 31)
+
+#define CPC_INT_CNTL                                   0xC2D0
+#define CP_ME1_PIPE0_INT_CNTL                          0xC214
+#define CP_ME1_PIPE1_INT_CNTL                          0xC218
+#define CP_ME1_PIPE2_INT_CNTL                          0xC21C
+#define CP_ME1_PIPE3_INT_CNTL                          0xC220
+#define CP_ME2_PIPE0_INT_CNTL                          0xC224
+#define CP_ME2_PIPE1_INT_CNTL                          0xC228
+#define CP_ME2_PIPE2_INT_CNTL                          0xC22C
+#define CP_ME2_PIPE3_INT_CNTL                          0xC230
+#define DEQUEUE_REQUEST_INT_ENABLE                     (1 << 13)
+#define WRM_POLL_TIMEOUT_INT_ENABLE                    (1 << 17)
+#define PRIV_REG_INT_ENABLE                            (1 << 23)
+#define TIME_STAMP_INT_ENABLE                          (1 << 26)
+#define GENERIC2_INT_ENABLE                            (1 << 29)
+#define GENERIC1_INT_ENABLE                            (1 << 30)
+#define GENERIC0_INT_ENABLE                            (1 << 31)
+#define CP_ME1_PIPE0_INT_STATUS                                0xC214
+#define CP_ME1_PIPE1_INT_STATUS                                0xC218
+#define CP_ME1_PIPE2_INT_STATUS                                0xC21C
+#define CP_ME1_PIPE3_INT_STATUS                                0xC220
+#define CP_ME2_PIPE0_INT_STATUS                                0xC224
+#define CP_ME2_PIPE1_INT_STATUS                                0xC228
+#define CP_ME2_PIPE2_INT_STATUS                                0xC22C
+#define CP_ME2_PIPE3_INT_STATUS                                0xC230
+#define DEQUEUE_REQUEST_INT_STATUS                     (1 << 13)
+#define WRM_POLL_TIMEOUT_INT_STATUS                    (1 << 17)
+#define PRIV_REG_INT_STATUS                            (1 << 23)
+#define TIME_STAMP_INT_STATUS                          (1 << 26)
+#define GENERIC2_INT_STATUS                            (1 << 29)
+#define GENERIC1_INT_STATUS                            (1 << 30)
+#define GENERIC0_INT_STATUS                            (1 << 31)
+
+#define CP_HPD_EOP_BASE_ADDR                           0xC904
+#define CP_HPD_EOP_BASE_ADDR_HI                                0xC908
+#define CP_HPD_EOP_VMID                                        0xC90C
+#define CP_HPD_EOP_CONTROL                             0xC910
+#define        EOP_SIZE(x)                                     ((x) << 0)
+#define        EOP_SIZE_MASK                                   (0x3f << 0)
+#define CP_MQD_BASE_ADDR                               0xC914
+#define CP_MQD_BASE_ADDR_HI                            0xC918
+#define CP_HQD_ACTIVE                                  0xC91C
+#define CP_HQD_VMID                                    0xC920
+
+#define CP_HQD_PERSISTENT_STATE                                0xC924u
+#define        DEFAULT_CP_HQD_PERSISTENT_STATE                 (0x33U << 8)
+#define        PRELOAD_REQ                                     (1 << 0)
+
+#define CP_HQD_PIPE_PRIORITY                           0xC928u
+#define CP_HQD_QUEUE_PRIORITY                          0xC92Cu
+#define CP_HQD_QUANTUM                                 0xC930u
+#define        QUANTUM_EN                                      1U
+#define        QUANTUM_SCALE_1MS                               (1U << 4)
+#define        QUANTUM_DURATION(x)                             ((x) << 8)
+
+#define CP_HQD_PQ_BASE                                 0xC934
+#define CP_HQD_PQ_BASE_HI                              0xC938
+#define CP_HQD_PQ_RPTR                                 0xC93C
+#define CP_HQD_PQ_RPTR_REPORT_ADDR                     0xC940
+#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI                  0xC944
+#define CP_HQD_PQ_WPTR_POLL_ADDR                       0xC948
+#define CP_HQD_PQ_WPTR_POLL_ADDR_HI                    0xC94C
+#define CP_HQD_PQ_DOORBELL_CONTROL                     0xC950
+#define        DOORBELL_OFFSET(x)                              ((x) << 2)
+#define        DOORBELL_OFFSET_MASK                            (0x1fffff << 2)
+#define        DOORBELL_SOURCE                                 (1 << 28)
+#define        DOORBELL_SCHD_HIT                               (1 << 29)
+#define        DOORBELL_EN                                     (1 << 30)
+#define        DOORBELL_HIT                                    (1 << 31)
+#define CP_HQD_PQ_WPTR                                 0xC954
+#define CP_HQD_PQ_CONTROL                              0xC958
+#define        QUEUE_SIZE(x)                                   ((x) << 0)
+#define        QUEUE_SIZE_MASK                                 (0x3f << 0)
+#define        RPTR_BLOCK_SIZE(x)                              ((x) << 8)
+#define        RPTR_BLOCK_SIZE_MASK                            (0x3f << 8)
+#define        MIN_AVAIL_SIZE(x)                               ((x) << 20)
+#define        PQ_ATC_EN                                       (1 << 23)
+#define        PQ_VOLATILE                                     (1 << 26)
+#define        NO_UPDATE_RPTR                                  (1 << 27)
+#define        UNORD_DISPATCH                                  (1 << 28)
+#define        ROQ_PQ_IB_FLIP                                  (1 << 29)
+#define        PRIV_STATE                                      (1 << 30)
+#define        KMD_QUEUE                                       (1 << 31)
+
+#define        DEFAULT_RPTR_BLOCK_SIZE                         RPTR_BLOCK_SIZE(5)
+#define        DEFAULT_MIN_AVAIL_SIZE                          MIN_AVAIL_SIZE(3)
+
+#define CP_HQD_IB_BASE_ADDR                            0xC95Cu
+#define CP_HQD_IB_BASE_ADDR_HI                         0xC960u
+#define CP_HQD_IB_RPTR                                 0xC964u
+#define CP_HQD_IB_CONTROL                              0xC968u
+#define        IB_ATC_EN                                       (1U << 23)
+#define        DEFAULT_MIN_IB_AVAIL_SIZE                       (3U << 20)
+
+#define CP_HQD_DEQUEUE_REQUEST                         0xC974
+#define        DEQUEUE_REQUEST_DRAIN                           1
+#define DEQUEUE_REQUEST_RESET                          2
+#define                DEQUEUE_INT                                     (1U << 8)
+
+#define CP_HQD_SEMA_CMD                                        0xC97Cu
+#define CP_HQD_MSG_TYPE                                        0xC980u
+#define CP_HQD_ATOMIC0_PREOP_LO                                0xC984u
+#define CP_HQD_ATOMIC0_PREOP_HI                                0xC988u
+#define CP_HQD_ATOMIC1_PREOP_LO                                0xC98Cu
+#define CP_HQD_ATOMIC1_PREOP_HI                                0xC990u
+#define CP_HQD_HQ_SCHEDULER0                           0xC994u
+#define CP_HQD_HQ_SCHEDULER1                           0xC998u
+
+
+#define CP_MQD_CONTROL                                 0xC99C
+#define        MQD_VMID(x)                                     ((x) << 0)
+#define        MQD_VMID_MASK                                   (0xf << 0)
+#define        MQD_CONTROL_PRIV_STATE_EN                       (1U << 8)
+
+#define GRBM_GFX_INDEX                                 0x30800
+#define        INSTANCE_INDEX(x)                               ((x) << 0)
+#define        SH_INDEX(x)                                     ((x) << 8)
+#define        SE_INDEX(x)                                     ((x) << 16)
+#define        SH_BROADCAST_WRITES                             (1 << 29)
+#define        INSTANCE_BROADCAST_WRITES                       (1 << 30)
+#define        SE_BROADCAST_WRITES                             (1 << 31)
+
+#define SQC_CACHES                                     0x30d20
+#define SQC_POLICY                                     0x8C38u
+#define SQC_VOLATILE                                   0x8C3Cu
+
+#define CP_PERFMON_CNTL                                        0x36020
+
+#define ATC_VMID0_PASID_MAPPING                                0x339Cu
+#define        ATC_VMID_PASID_MAPPING_UPDATE_STATUS            0x3398u
+#define        ATC_VMID_PASID_MAPPING_VALID                    (1U << 31)
+
+#define ATC_VM_APERTURE0_CNTL                          0x3310u
+#define        ATS_ACCESS_MODE_NEVER                           0
+#define        ATS_ACCESS_MODE_ALWAYS                          1
+
+#define ATC_VM_APERTURE0_CNTL2                         0x3318u
+#define ATC_VM_APERTURE0_HIGH_ADDR                     0x3308u
+#define ATC_VM_APERTURE0_LOW_ADDR                      0x3300u
+#define ATC_VM_APERTURE1_CNTL                          0x3314u
+#define ATC_VM_APERTURE1_CNTL2                         0x331Cu
+#define ATC_VM_APERTURE1_HIGH_ADDR                     0x330Cu
+#define ATC_VM_APERTURE1_LOW_ADDR                      0x3304u
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
new file mode 100644 (file)
index 0000000..4f7b275
--- /dev/null
@@ -0,0 +1,595 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
+#include <uapi/linux/kfd_ioctl.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <uapi/asm-generic/mman-common.h>
+#include <asm/processor.h>
+#include "kfd_priv.h"
+#include "kfd_device_queue_manager.h"
+
+static long kfd_ioctl(struct file *, unsigned int, unsigned long);
+static int kfd_open(struct inode *, struct file *);
+static int kfd_mmap(struct file *, struct vm_area_struct *);
+
+static const char kfd_dev_name[] = "kfd";
+
+static const struct file_operations kfd_fops = {
+       .owner = THIS_MODULE,
+       .unlocked_ioctl = kfd_ioctl,
+       .compat_ioctl = kfd_ioctl,
+       .open = kfd_open,
+       .mmap = kfd_mmap,
+};
+
+static int kfd_char_dev_major = -1;
+static struct class *kfd_class;
+struct device *kfd_device;
+
+int kfd_chardev_init(void)
+{
+       int err = 0;
+
+       kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
+       err = kfd_char_dev_major;
+       if (err < 0)
+               goto err_register_chrdev;
+
+       kfd_class = class_create(THIS_MODULE, kfd_dev_name);
+       err = PTR_ERR(kfd_class);
+       if (IS_ERR(kfd_class))
+               goto err_class_create;
+
+       kfd_device = device_create(kfd_class, NULL,
+                                       MKDEV(kfd_char_dev_major, 0),
+                                       NULL, kfd_dev_name);
+       err = PTR_ERR(kfd_device);
+       if (IS_ERR(kfd_device))
+               goto err_device_create;
+
+       return 0;
+
+err_device_create:
+       class_destroy(kfd_class);
+err_class_create:
+       unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
+err_register_chrdev:
+       return err;
+}
+
+void kfd_chardev_exit(void)
+{
+       device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
+       class_destroy(kfd_class);
+       unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
+}
+
+struct device *kfd_chardev(void)
+{
+       return kfd_device;
+}
+
+
+static int kfd_open(struct inode *inode, struct file *filep)
+{
+       struct kfd_process *process;
+       bool is_32bit_user_mode;
+
+       if (iminor(inode) != 0)
+               return -ENODEV;
+
+       is_32bit_user_mode = is_compat_task();
+
+       if (is_32bit_user_mode == true) {
+               dev_warn(kfd_device,
+                       "Process %d (32-bit) failed to open /dev/kfd\n"
+                       "32-bit processes are not supported by amdkfd\n",
+                       current->pid);
+               return -EPERM;
+       }
+
+       process = kfd_create_process(current);
+       if (IS_ERR(process))
+               return PTR_ERR(process);
+
+       process->is_32bit_user_mode = is_32bit_user_mode;
+
+       dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
+               process->pasid, process->is_32bit_user_mode);
+
+       kfd_init_apertures(process);
+
+       return 0;
+}
+
+static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
+                                       void __user *arg)
+{
+       struct kfd_ioctl_get_version_args args;
+       int err = 0;
+
+       args.major_version = KFD_IOCTL_MAJOR_VERSION;
+       args.minor_version = KFD_IOCTL_MINOR_VERSION;
+
+       if (copy_to_user(arg, &args, sizeof(args)))
+               err = -EFAULT;
+
+       return err;
+}
+
+static int set_queue_properties_from_user(struct queue_properties *q_properties,
+                               struct kfd_ioctl_create_queue_args *args)
+{
+       if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+               pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
+               return -EINVAL;
+       }
+
+       if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
+               pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
+               return -EINVAL;
+       }
+
+       if ((args->ring_base_address) &&
+               (!access_ok(VERIFY_WRITE,
+                       (const void __user *) args->ring_base_address,
+                       sizeof(uint64_t)))) {
+               pr_err("kfd: can't access ring base address\n");
+               return -EFAULT;
+       }
+
+       if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
+               pr_err("kfd: ring size must be a power of 2 or 0\n");
+               return -EINVAL;
+       }
+
+       if (!access_ok(VERIFY_WRITE,
+                       (const void __user *) args->read_pointer_address,
+                       sizeof(uint32_t))) {
+               pr_err("kfd: can't access read pointer\n");
+               return -EFAULT;
+       }
+
+       if (!access_ok(VERIFY_WRITE,
+                       (const void __user *) args->write_pointer_address,
+                       sizeof(uint32_t))) {
+               pr_err("kfd: can't access write pointer\n");
+               return -EFAULT;
+       }
+
+       q_properties->is_interop = false;
+       q_properties->queue_percent = args->queue_percentage;
+       q_properties->priority = args->queue_priority;
+       q_properties->queue_address = args->ring_base_address;
+       q_properties->queue_size = args->ring_size;
+       q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
+       q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
+       if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
+               args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
+               q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
+       else
+               return -ENOTSUPP;
+
+       if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
+               q_properties->format = KFD_QUEUE_FORMAT_AQL;
+       else
+               q_properties->format = KFD_QUEUE_FORMAT_PM4;
+
+       pr_debug("Queue Percentage (%d, %d)\n",
+                       q_properties->queue_percent, args->queue_percentage);
+
+       pr_debug("Queue Priority (%d, %d)\n",
+                       q_properties->priority, args->queue_priority);
+
+       pr_debug("Queue Address (0x%llX, 0x%llX)\n",
+                       q_properties->queue_address, args->ring_base_address);
+
+       pr_debug("Queue Size (0x%llX, %u)\n",
+                       q_properties->queue_size, args->ring_size);
+
+       pr_debug("Queue r/w Pointers (0x%llX, 0x%llX)\n",
+                       (uint64_t) q_properties->read_ptr,
+                       (uint64_t) q_properties->write_ptr);
+
+       pr_debug("Queue Format (%d)\n", q_properties->format);
+
+       return 0;
+}
+
+static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
+                                       void __user *arg)
+{
+       struct kfd_ioctl_create_queue_args args;
+       struct kfd_dev *dev;
+       int err = 0;
+       unsigned int queue_id;
+       struct kfd_process_device *pdd;
+       struct queue_properties q_properties;
+
+       memset(&q_properties, 0, sizeof(struct queue_properties));
+
+       if (copy_from_user(&args, arg, sizeof(args)))
+               return -EFAULT;
+
+       pr_debug("kfd: creating queue ioctl\n");
+
+       err = set_queue_properties_from_user(&q_properties, &args);
+       if (err)
+               return err;
+
+       dev = kfd_device_by_id(args.gpu_id);
+       if (dev == NULL)
+               return -EINVAL;
+
+       mutex_lock(&p->mutex);
+
+       pdd = kfd_bind_process_to_device(dev, p);
+       if (IS_ERR(pdd)) {
+               err = PTR_ERR(pdd);
+               goto err_bind_process;
+       }
+
+       pr_debug("kfd: creating queue for PASID %d on GPU 0x%x\n",
+                       p->pasid,
+                       dev->id);
+
+       err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, 0,
+                               KFD_QUEUE_TYPE_COMPUTE, &queue_id);
+       if (err != 0)
+               goto err_create_queue;
+
+       args.queue_id = queue_id;
+
+       /* Return gpu_id as doorbell offset for mmap usage */
+       args.doorbell_offset = args.gpu_id << PAGE_SHIFT;
+
+       if (copy_to_user(arg, &args, sizeof(args))) {
+               err = -EFAULT;
+               goto err_copy_args_out;
+       }
+
+       mutex_unlock(&p->mutex);
+
+       pr_debug("kfd: queue id %d was created successfully\n", args.queue_id);
+
+       pr_debug("ring buffer address == 0x%016llX\n",
+                       args.ring_base_address);
+
+       pr_debug("read ptr address    == 0x%016llX\n",
+                       args.read_pointer_address);
+
+       pr_debug("write ptr address   == 0x%016llX\n",
+                       args.write_pointer_address);
+
+       return 0;
+
+err_copy_args_out:
+       pqm_destroy_queue(&p->pqm, queue_id);
+err_create_queue:
+err_bind_process:
+       mutex_unlock(&p->mutex);
+       return err;
+}
+
+static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
+                                       void __user *arg)
+{
+       int retval;
+       struct kfd_ioctl_destroy_queue_args args;
+
+       if (copy_from_user(&args, arg, sizeof(args)))
+               return -EFAULT;
+
+       pr_debug("kfd: destroying queue id %d for PASID %d\n",
+                               args.queue_id,
+                               p->pasid);
+
+       mutex_lock(&p->mutex);
+
+       retval = pqm_destroy_queue(&p->pqm, args.queue_id);
+
+       mutex_unlock(&p->mutex);
+       return retval;
+}
+
+static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
+                                       void __user *arg)
+{
+       int retval;
+       struct kfd_ioctl_update_queue_args args;
+       struct queue_properties properties;
+
+       if (copy_from_user(&args, arg, sizeof(args)))
+               return -EFAULT;
+
+       if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+               pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
+               return -EINVAL;
+       }
+
+       if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) {
+               pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
+               return -EINVAL;
+       }
+
+       if ((args.ring_base_address) &&
+               (!access_ok(VERIFY_WRITE,
+                       (const void __user *) args.ring_base_address,
+                       sizeof(uint64_t)))) {
+               pr_err("kfd: can't access ring base address\n");
+               return -EFAULT;
+       }
+
+       if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) {
+               pr_err("kfd: ring size must be a power of 2 or 0\n");
+               return -EINVAL;
+       }
+
+       properties.queue_address = args.ring_base_address;
+       properties.queue_size = args.ring_size;
+       properties.queue_percent = args.queue_percentage;
+       properties.priority = args.queue_priority;
+
+       pr_debug("kfd: updating queue id %d for PASID %d\n",
+                       args.queue_id, p->pasid);
+
+       mutex_lock(&p->mutex);
+
+       retval = pqm_update_queue(&p->pqm, args.queue_id, &properties);
+
+       mutex_unlock(&p->mutex);
+
+       return retval;
+}
+
+static long kfd_ioctl_set_memory_policy(struct file *filep,
+                               struct kfd_process *p, void __user *arg)
+{
+       struct kfd_ioctl_set_memory_policy_args args;
+       struct kfd_dev *dev;
+       int err = 0;
+       struct kfd_process_device *pdd;
+       enum cache_policy default_policy, alternate_policy;
+
+       if (copy_from_user(&args, arg, sizeof(args)))
+               return -EFAULT;
+
+       if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT
+           && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+               return -EINVAL;
+       }
+
+       if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
+           && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+               return -EINVAL;
+       }
+
+       dev = kfd_device_by_id(args.gpu_id);
+       if (dev == NULL)
+               return -EINVAL;
+
+       mutex_lock(&p->mutex);
+
+       pdd = kfd_bind_process_to_device(dev, p);
+       if (IS_ERR(pdd)) {
+               err = PTR_ERR(pdd);
+               goto out;
+       }
+
+       default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+                        ? cache_policy_coherent : cache_policy_noncoherent;
+
+       alternate_policy =
+               (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+                  ? cache_policy_coherent : cache_policy_noncoherent;
+
+       if (!dev->dqm->set_cache_memory_policy(dev->dqm,
+                               &pdd->qpd,
+                               default_policy,
+                               alternate_policy,
+                               (void __user *)args.alternate_aperture_base,
+                               args.alternate_aperture_size))
+               err = -EINVAL;
+
+out:
+       mutex_unlock(&p->mutex);
+
+       return err;
+}
+
+static long kfd_ioctl_get_clock_counters(struct file *filep,
+                               struct kfd_process *p, void __user *arg)
+{
+       struct kfd_ioctl_get_clock_counters_args args;
+       struct kfd_dev *dev;
+       struct timespec time;
+
+       if (copy_from_user(&args, arg, sizeof(args)))
+               return -EFAULT;
+
+       dev = kfd_device_by_id(args.gpu_id);
+       if (dev == NULL)
+               return -EINVAL;
+
+       /* Reading GPU clock counter from KGD */
+       args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
+
+       /* No access to rdtsc. Using raw monotonic time */
+       getrawmonotonic(&time);
+       args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
+
+       get_monotonic_boottime(&time);
+       args.system_clock_counter = (uint64_t)timespec_to_ns(&time);
+
+       /* Since the counter is in nano-seconds we use 1GHz frequency */
+       args.system_clock_freq = 1000000000;
+
+       if (copy_to_user(arg, &args, sizeof(args)))
+               return -EFAULT;
+
+       return 0;
+}
+
+
+static int kfd_ioctl_get_process_apertures(struct file *filp,
+                               struct kfd_process *p, void __user *arg)
+{
+       struct kfd_ioctl_get_process_apertures_args args;
+       struct kfd_process_device_apertures *pAperture;
+       struct kfd_process_device *pdd;
+
+       dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
+
+       if (copy_from_user(&args, arg, sizeof(args)))
+               return -EFAULT;
+
+       args.num_of_nodes = 0;
+
+       mutex_lock(&p->mutex);
+
+       /*if the process-device list isn't empty*/
+       if (kfd_has_process_device_data(p)) {
+               /* Run over all pdd of the process */
+               pdd = kfd_get_first_process_device_data(p);
+               do {
+                       pAperture = &args.process_apertures[args.num_of_nodes];
+                       pAperture->gpu_id = pdd->dev->id;
+                       pAperture->lds_base = pdd->lds_base;
+                       pAperture->lds_limit = pdd->lds_limit;
+                       pAperture->gpuvm_base = pdd->gpuvm_base;
+                       pAperture->gpuvm_limit = pdd->gpuvm_limit;
+                       pAperture->scratch_base = pdd->scratch_base;
+                       pAperture->scratch_limit = pdd->scratch_limit;
+
+                       dev_dbg(kfd_device,
+                               "node id %u\n", args.num_of_nodes);
+                       dev_dbg(kfd_device,
+                               "gpu id %u\n", pdd->dev->id);
+                       dev_dbg(kfd_device,
+                               "lds_base %llX\n", pdd->lds_base);
+                       dev_dbg(kfd_device,
+                               "lds_limit %llX\n", pdd->lds_limit);
+                       dev_dbg(kfd_device,
+                               "gpuvm_base %llX\n", pdd->gpuvm_base);
+                       dev_dbg(kfd_device,
+                               "gpuvm_limit %llX\n", pdd->gpuvm_limit);
+                       dev_dbg(kfd_device,
+                               "scratch_base %llX\n", pdd->scratch_base);
+                       dev_dbg(kfd_device,
+                               "scratch_limit %llX\n", pdd->scratch_limit);
+
+                       args.num_of_nodes++;
+               } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
+                               (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS));
+       }
+
+       mutex_unlock(&p->mutex);
+
+       if (copy_to_user(arg, &args, sizeof(args)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+       struct kfd_process *process;
+       long err = -EINVAL;
+
+       dev_dbg(kfd_device,
+               "ioctl cmd 0x%x (#%d), arg 0x%lx\n",
+               cmd, _IOC_NR(cmd), arg);
+
+       process = kfd_get_process(current);
+       if (IS_ERR(process))
+               return PTR_ERR(process);
+
+       switch (cmd) {
+       case KFD_IOC_GET_VERSION:
+               err = kfd_ioctl_get_version(filep, process, (void __user *)arg);
+               break;
+       case KFD_IOC_CREATE_QUEUE:
+               err = kfd_ioctl_create_queue(filep, process,
+                                               (void __user *)arg);
+               break;
+
+       case KFD_IOC_DESTROY_QUEUE:
+               err = kfd_ioctl_destroy_queue(filep, process,
+                                               (void __user *)arg);
+               break;
+
+       case KFD_IOC_SET_MEMORY_POLICY:
+               err = kfd_ioctl_set_memory_policy(filep, process,
+                                               (void __user *)arg);
+               break;
+
+       case KFD_IOC_GET_CLOCK_COUNTERS:
+               err = kfd_ioctl_get_clock_counters(filep, process,
+                                               (void __user *)arg);
+               break;
+
+       case KFD_IOC_GET_PROCESS_APERTURES:
+               err = kfd_ioctl_get_process_apertures(filep, process,
+                                               (void __user *)arg);
+               break;
+
+       case KFD_IOC_UPDATE_QUEUE:
+               err = kfd_ioctl_update_queue(filep, process,
+                                               (void __user *)arg);
+               break;
+
+       default:
+               dev_err(kfd_device,
+                       "unknown ioctl cmd 0x%x, arg 0x%lx)\n",
+                       cmd, arg);
+               err = -EINVAL;
+               break;
+       }
+
+       if (err < 0)
+               dev_err(kfd_device,
+                       "ioctl error %ld for ioctl cmd 0x%x (#%d)\n",
+                       err, cmd, _IOC_NR(cmd));
+
+       return err;
+}
+
+static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct kfd_process *process;
+
+       process = kfd_get_process(current);
+       if (IS_ERR(process))
+               return PTR_ERR(process);
+
+       return kfd_doorbell_mmap(process, vma);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
new file mode 100644 (file)
index 0000000..a374fa3
--- /dev/null
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_CRAT_H_INCLUDED
+#define KFD_CRAT_H_INCLUDED
+
+#include <linux/types.h>
+
+#pragma pack(1)
+
+/*
+ * 4CC signature values for the CRAT and CDIT ACPI tables
+ */
+
+#define CRAT_SIGNATURE "CRAT"
+#define CDIT_SIGNATURE "CDIT"
+
+/*
+ * Component Resource Association Table (CRAT)
+ */
+
+#define CRAT_OEMID_LENGTH      6
+#define CRAT_OEMTABLEID_LENGTH 8
+#define CRAT_RESERVED_LENGTH   6
+
+#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1)
+
+struct crat_header {
+       uint32_t        signature;
+       uint32_t        length;
+       uint8_t         revision;
+       uint8_t         checksum;
+       uint8_t         oem_id[CRAT_OEMID_LENGTH];
+       uint8_t         oem_table_id[CRAT_OEMTABLEID_LENGTH];
+       uint32_t        oem_revision;
+       uint32_t        creator_id;
+       uint32_t        creator_revision;
+       uint32_t        total_entries;
+       uint16_t        num_domains;
+       uint8_t         reserved[CRAT_RESERVED_LENGTH];
+};
+
+/*
+ * The header structure is immediately followed by total_entries of the
+ * data definitions
+ */
+
+/*
+ * The currently defined subtype entries in the CRAT
+ */
+#define CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY      0
+#define CRAT_SUBTYPE_MEMORY_AFFINITY           1
+#define CRAT_SUBTYPE_CACHE_AFFINITY            2
+#define CRAT_SUBTYPE_TLB_AFFINITY              3
+#define CRAT_SUBTYPE_CCOMPUTE_AFFINITY         4
+#define CRAT_SUBTYPE_IOLINK_AFFINITY           5
+#define CRAT_SUBTYPE_MAX                       6
+
+#define CRAT_SIBLINGMAP_SIZE   32
+
+/*
+ * ComputeUnit Affinity structure and definitions
+ */
+#define CRAT_CU_FLAGS_ENABLED          0x00000001
+#define CRAT_CU_FLAGS_HOT_PLUGGABLE    0x00000002
+#define CRAT_CU_FLAGS_CPU_PRESENT      0x00000004
+#define CRAT_CU_FLAGS_GPU_PRESENT      0x00000008
+#define CRAT_CU_FLAGS_IOMMU_PRESENT    0x00000010
+#define CRAT_CU_FLAGS_RESERVED         0xffffffe0
+
+#define CRAT_COMPUTEUNIT_RESERVED_LENGTH 4
+
+struct crat_subtype_computeunit {
+       uint8_t         type;
+       uint8_t         length;
+       uint16_t        reserved;
+       uint32_t        flags;
+       uint32_t        proximity_domain;
+       uint32_t        processor_id_low;
+       uint16_t        num_cpu_cores;
+       uint16_t        num_simd_cores;
+       uint16_t        max_waves_simd;
+       uint16_t        io_count;
+       uint16_t        hsa_capability;
+       uint16_t        lds_size_in_kb;
+       uint8_t         wave_front_size;
+       uint8_t         num_banks;
+       uint16_t        micro_engine_id;
+       uint8_t         num_arrays;
+       uint8_t         num_cu_per_array;
+       uint8_t         num_simd_per_cu;
+       uint8_t         max_slots_scatch_cu;
+       uint8_t         reserved2[CRAT_COMPUTEUNIT_RESERVED_LENGTH];
+};
+
+/*
+ * HSA Memory Affinity structure and definitions
+ */
+#define CRAT_MEM_FLAGS_ENABLED         0x00000001
+#define CRAT_MEM_FLAGS_HOT_PLUGGABLE   0x00000002
+#define CRAT_MEM_FLAGS_NON_VOLATILE    0x00000004
+#define CRAT_MEM_FLAGS_RESERVED                0xfffffff8
+
+#define CRAT_MEMORY_RESERVED_LENGTH 8
+
+struct crat_subtype_memory {
+       uint8_t         type;
+       uint8_t         length;
+       uint16_t        reserved;
+       uint32_t        flags;
+       uint32_t        promixity_domain;
+       uint32_t        base_addr_low;
+       uint32_t        base_addr_high;
+       uint32_t        length_low;
+       uint32_t        length_high;
+       uint32_t        width;
+       uint8_t         reserved2[CRAT_MEMORY_RESERVED_LENGTH];
+};
+
+/*
+ * HSA Cache Affinity structure and definitions
+ */
+#define CRAT_CACHE_FLAGS_ENABLED       0x00000001
+#define CRAT_CACHE_FLAGS_DATA_CACHE    0x00000002
+#define CRAT_CACHE_FLAGS_INST_CACHE    0x00000004
+#define CRAT_CACHE_FLAGS_CPU_CACHE     0x00000008
+#define CRAT_CACHE_FLAGS_SIMD_CACHE    0x00000010
+#define CRAT_CACHE_FLAGS_RESERVED      0xffffffe0
+
+#define CRAT_CACHE_RESERVED_LENGTH 8
+
+struct crat_subtype_cache {
+       uint8_t         type;
+       uint8_t         length;
+       uint16_t        reserved;
+       uint32_t        flags;
+       uint32_t        processor_id_low;
+       uint8_t         sibling_map[CRAT_SIBLINGMAP_SIZE];
+       uint32_t        cache_size;
+       uint8_t         cache_level;
+       uint8_t         lines_per_tag;
+       uint16_t        cache_line_size;
+       uint8_t         associativity;
+       uint8_t         cache_properties;
+       uint16_t        cache_latency;
+       uint8_t         reserved2[CRAT_CACHE_RESERVED_LENGTH];
+};
+
+/*
+ * HSA TLB Affinity structure and definitions
+ */
+#define CRAT_TLB_FLAGS_ENABLED 0x00000001
+#define CRAT_TLB_FLAGS_DATA_TLB        0x00000002
+#define CRAT_TLB_FLAGS_INST_TLB        0x00000004
+#define CRAT_TLB_FLAGS_CPU_TLB 0x00000008
+#define CRAT_TLB_FLAGS_SIMD_TLB        0x00000010
+#define CRAT_TLB_FLAGS_RESERVED        0xffffffe0
+
+#define CRAT_TLB_RESERVED_LENGTH 4
+
+struct crat_subtype_tlb {
+       uint8_t         type;
+       uint8_t         length;
+       uint16_t        reserved;
+       uint32_t        flags;
+       uint32_t        processor_id_low;
+       uint8_t         sibling_map[CRAT_SIBLINGMAP_SIZE];
+       uint32_t        tlb_level;
+       uint8_t         data_tlb_associativity_2mb;
+       uint8_t         data_tlb_size_2mb;
+       uint8_t         instruction_tlb_associativity_2mb;
+       uint8_t         instruction_tlb_size_2mb;
+       uint8_t         data_tlb_associativity_4k;
+       uint8_t         data_tlb_size_4k;
+       uint8_t         instruction_tlb_associativity_4k;
+       uint8_t         instruction_tlb_size_4k;
+       uint8_t         data_tlb_associativity_1gb;
+       uint8_t         data_tlb_size_1gb;
+       uint8_t         instruction_tlb_associativity_1gb;
+       uint8_t         instruction_tlb_size_1gb;
+       uint8_t         reserved2[CRAT_TLB_RESERVED_LENGTH];
+};
+
+/*
+ * HSA CCompute/APU Affinity structure and definitions
+ */
+#define CRAT_CCOMPUTE_FLAGS_ENABLED    0x00000001
+#define CRAT_CCOMPUTE_FLAGS_RESERVED   0xfffffffe
+
+#define CRAT_CCOMPUTE_RESERVED_LENGTH 16
+
+struct crat_subtype_ccompute {
+       uint8_t         type;
+       uint8_t         length;
+       uint16_t        reserved;
+       uint32_t        flags;
+       uint32_t        processor_id_low;
+       uint8_t         sibling_map[CRAT_SIBLINGMAP_SIZE];
+       uint32_t        apu_size;
+       uint8_t         reserved2[CRAT_CCOMPUTE_RESERVED_LENGTH];
+};
+
+/*
+ * HSA IO Link Affinity structure and definitions
+ */
+#define CRAT_IOLINK_FLAGS_ENABLED      0x00000001
+#define CRAT_IOLINK_FLAGS_COHERENCY    0x00000002
+#define CRAT_IOLINK_FLAGS_RESERVED     0xfffffffc
+
+/*
+ * IO interface types
+ */
+#define CRAT_IOLINK_TYPE_UNDEFINED     0
+#define CRAT_IOLINK_TYPE_HYPERTRANSPORT        1
+#define CRAT_IOLINK_TYPE_PCIEXPRESS    2
+#define CRAT_IOLINK_TYPE_OTHER         3
+#define CRAT_IOLINK_TYPE_MAX           255
+
+#define CRAT_IOLINK_RESERVED_LENGTH 24
+
+struct crat_subtype_iolink {
+       uint8_t         type;
+       uint8_t         length;
+       uint16_t        reserved;
+       uint32_t        flags;
+       uint32_t        proximity_domain_from;
+       uint32_t        proximity_domain_to;
+       uint8_t         io_interface_type;
+       uint8_t         version_major;
+       uint16_t        version_minor;
+       uint32_t        minimum_latency;
+       uint32_t        maximum_latency;
+       uint32_t        minimum_bandwidth_mbs;
+       uint32_t        maximum_bandwidth_mbs;
+       uint32_t        recommended_transfer_size;
+       uint8_t         reserved2[CRAT_IOLINK_RESERVED_LENGTH];
+};
+
+/*
+ * HSA generic sub-type header
+ */
+
+#define CRAT_SUBTYPE_FLAGS_ENABLED 0x00000001
+
+struct crat_subtype_generic {
+       uint8_t         type;
+       uint8_t         length;
+       uint16_t        reserved;
+       uint32_t        flags;
+};
+
+/*
+ * Component Locality Distance Information Table (CDIT)
+ */
+#define CDIT_OEMID_LENGTH      6
+#define CDIT_OEMTABLEID_LENGTH 8
+
+struct cdit_header {
+       uint32_t        signature;
+       uint32_t        length;
+       uint8_t         revision;
+       uint8_t         checksum;
+       uint8_t         oem_id[CDIT_OEMID_LENGTH];
+       uint8_t         oem_table_id[CDIT_OEMTABLEID_LENGTH];
+       uint32_t        oem_revision;
+       uint32_t        creator_id;
+       uint32_t        creator_revision;
+       uint32_t        total_entries;
+       uint16_t        num_domains;
+       uint8_t         entry[1];
+};
+
+#pragma pack()
+
+#endif /* KFD_CRAT_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
new file mode 100644 (file)
index 0000000..43884eb
--- /dev/null
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/amd-iommu.h>
+#include <linux/bsearch.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "kfd_priv.h"
+#include "kfd_device_queue_manager.h"
+
+#define MQD_SIZE_ALIGNED 768
+
+static const struct kfd_device_info kaveri_device_info = {
+       .max_pasid_bits = 16,
+       .ih_ring_entry_size = 4 * sizeof(uint32_t),
+       .mqd_size_aligned = MQD_SIZE_ALIGNED
+};
+
+struct kfd_deviceid {
+       unsigned short did;
+       const struct kfd_device_info *device_info;
+};
+
+/* Please keep this sorted by increasing device id. */
+static const struct kfd_deviceid supported_devices[] = {
+       { 0x1304, &kaveri_device_info },        /* Kaveri */
+       { 0x1305, &kaveri_device_info },        /* Kaveri */
+       { 0x1306, &kaveri_device_info },        /* Kaveri */
+       { 0x1307, &kaveri_device_info },        /* Kaveri */
+       { 0x1309, &kaveri_device_info },        /* Kaveri */
+       { 0x130A, &kaveri_device_info },        /* Kaveri */
+       { 0x130B, &kaveri_device_info },        /* Kaveri */
+       { 0x130C, &kaveri_device_info },        /* Kaveri */
+       { 0x130D, &kaveri_device_info },        /* Kaveri */
+       { 0x130E, &kaveri_device_info },        /* Kaveri */
+       { 0x130F, &kaveri_device_info },        /* Kaveri */
+       { 0x1310, &kaveri_device_info },        /* Kaveri */
+       { 0x1311, &kaveri_device_info },        /* Kaveri */
+       { 0x1312, &kaveri_device_info },        /* Kaveri */
+       { 0x1313, &kaveri_device_info },        /* Kaveri */
+       { 0x1315, &kaveri_device_info },        /* Kaveri */
+       { 0x1316, &kaveri_device_info },        /* Kaveri */
+       { 0x1317, &kaveri_device_info },        /* Kaveri */
+       { 0x1318, &kaveri_device_info },        /* Kaveri */
+       { 0x131B, &kaveri_device_info },        /* Kaveri */
+       { 0x131C, &kaveri_device_info },        /* Kaveri */
+       { 0x131D, &kaveri_device_info },        /* Kaveri */
+};
+
+static const struct kfd_device_info *lookup_device_info(unsigned short did)
+{
+       size_t i;
+
+       for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
+               if (supported_devices[i].did == did) {
+                       BUG_ON(supported_devices[i].device_info == NULL);
+                       return supported_devices[i].device_info;
+               }
+       }
+
+       return NULL;
+}
+
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev)
+{
+       struct kfd_dev *kfd;
+
+       const struct kfd_device_info *device_info =
+                                       lookup_device_info(pdev->device);
+
+       if (!device_info)
+               return NULL;
+
+       kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
+       if (!kfd)
+               return NULL;
+
+       kfd->kgd = kgd;
+       kfd->device_info = device_info;
+       kfd->pdev = pdev;
+       kfd->init_complete = false;
+
+       return kfd;
+}
+
+static bool device_iommu_pasid_init(struct kfd_dev *kfd)
+{
+       const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP |
+                                       AMD_IOMMU_DEVICE_FLAG_PRI_SUP |
+                                       AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
+
+       struct amd_iommu_device_info iommu_info;
+       unsigned int pasid_limit;
+       int err;
+
+       err = amd_iommu_device_info(kfd->pdev, &iommu_info);
+       if (err < 0) {
+               dev_err(kfd_device,
+                       "error getting iommu info. is the iommu enabled?\n");
+               return false;
+       }
+
+       if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
+               dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n",
+                      (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
+                      (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
+                      (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) != 0);
+               return false;
+       }
+
+       pasid_limit = min_t(unsigned int,
+                       (unsigned int)1 << kfd->device_info->max_pasid_bits,
+                       iommu_info.max_pasids);
+       /*
+        * last pasid is used for kernel queues doorbells
+        * in the future the last pasid might be used for a kernel thread.
+        */
+       pasid_limit = min_t(unsigned int,
+                               pasid_limit,
+                               kfd->doorbell_process_limit - 1);
+
+       err = amd_iommu_init_device(kfd->pdev, pasid_limit);
+       if (err < 0) {
+               dev_err(kfd_device, "error initializing iommu device\n");
+               return false;
+       }
+
+       if (!kfd_set_pasid_limit(pasid_limit)) {
+               dev_err(kfd_device, "error setting pasid limit\n");
+               amd_iommu_free_device(kfd->pdev);
+               return false;
+       }
+
+       return true;
+}
+
+static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
+{
+       struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
+
+       if (dev)
+               kfd_unbind_process_from_device(dev, pasid);
+}
+
+bool kgd2kfd_device_init(struct kfd_dev *kfd,
+                        const struct kgd2kfd_shared_resources *gpu_resources)
+{
+       unsigned int size;
+
+       kfd->shared_resources = *gpu_resources;
+
+       /* calculate max size of mqds needed for queues */
+       size = max_num_of_processes *
+               max_num_of_queues_per_process *
+               kfd->device_info->mqd_size_aligned;
+
+       /* add another 512KB for all other allocations on gart */
+       size += 512 * 1024;
+
+       if (kfd2kgd->init_sa_manager(kfd->kgd, size)) {
+               dev_err(kfd_device,
+                       "Error initializing sa manager for device (%x:%x)\n",
+                       kfd->pdev->vendor, kfd->pdev->device);
+               goto out;
+       }
+
+       kfd_doorbell_init(kfd);
+
+       if (kfd_topology_add_device(kfd) != 0) {
+               dev_err(kfd_device,
+                       "Error adding device (%x:%x) to topology\n",
+                       kfd->pdev->vendor, kfd->pdev->device);
+               goto kfd_topology_add_device_error;
+       }
+
+       if (kfd_interrupt_init(kfd)) {
+               dev_err(kfd_device,
+                       "Error initializing interrupts for device (%x:%x)\n",
+                       kfd->pdev->vendor, kfd->pdev->device);
+               goto kfd_interrupt_error;
+       }
+
+       if (!device_iommu_pasid_init(kfd)) {
+               dev_err(kfd_device,
+                       "Error initializing iommuv2 for device (%x:%x)\n",
+                       kfd->pdev->vendor, kfd->pdev->device);
+               goto device_iommu_pasid_error;
+       }
+       amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
+                                               iommu_pasid_shutdown_callback);
+
+       kfd->dqm = device_queue_manager_init(kfd);
+       if (!kfd->dqm) {
+               dev_err(kfd_device,
+                       "Error initializing queue manager for device (%x:%x)\n",
+                       kfd->pdev->vendor, kfd->pdev->device);
+               goto device_queue_manager_error;
+       }
+
+       if (kfd->dqm->start(kfd->dqm) != 0) {
+               dev_err(kfd_device,
+                       "Error starting queuen manager for device (%x:%x)\n",
+                       kfd->pdev->vendor, kfd->pdev->device);
+               goto dqm_start_error;
+       }
+
+       kfd->init_complete = true;
+       dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor,
+                kfd->pdev->device);
+
+       pr_debug("kfd: Starting kfd with the following scheduling policy %d\n",
+               sched_policy);
+
+       goto out;
+
+dqm_start_error:
+       device_queue_manager_uninit(kfd->dqm);
+device_queue_manager_error:
+       amd_iommu_free_device(kfd->pdev);
+device_iommu_pasid_error:
+       kfd_interrupt_exit(kfd);
+kfd_interrupt_error:
+       kfd_topology_remove_device(kfd);
+kfd_topology_add_device_error:
+       kfd2kgd->fini_sa_manager(kfd->kgd);
+       dev_err(kfd_device,
+               "device (%x:%x) NOT added due to errors\n",
+               kfd->pdev->vendor, kfd->pdev->device);
+out:
+       return kfd->init_complete;
+}
+
+void kgd2kfd_device_exit(struct kfd_dev *kfd)
+{
+       if (kfd->init_complete) {
+               device_queue_manager_uninit(kfd->dqm);
+               amd_iommu_free_device(kfd->pdev);
+               kfd_interrupt_exit(kfd);
+               kfd_topology_remove_device(kfd);
+       }
+
+       kfree(kfd);
+}
+
+void kgd2kfd_suspend(struct kfd_dev *kfd)
+{
+       BUG_ON(kfd == NULL);
+
+       if (kfd->init_complete) {
+               kfd->dqm->stop(kfd->dqm);
+               amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
+               amd_iommu_free_device(kfd->pdev);
+       }
+}
+
+int kgd2kfd_resume(struct kfd_dev *kfd)
+{
+       unsigned int pasid_limit;
+       int err;
+
+       BUG_ON(kfd == NULL);
+
+       pasid_limit = kfd_get_pasid_limit();
+
+       if (kfd->init_complete) {
+               err = amd_iommu_init_device(kfd->pdev, pasid_limit);
+               if (err < 0)
+                       return -ENXIO;
+               amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
+                                               iommu_pasid_shutdown_callback);
+               kfd->dqm->start(kfd->dqm);
+       }
+
+       return 0;
+}
+
+/* This is called directly from KGD at ISR. */
+void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+{
+       if (kfd->init_complete) {
+               spin_lock(&kfd->interrupt_lock);
+
+               if (kfd->interrupts_active
+                   && enqueue_ih_ring_entry(kfd, ih_ring_entry))
+                       schedule_work(&kfd->interrupt_work);
+
+               spin_unlock(&kfd->interrupt_lock);
+       }
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
new file mode 100644 (file)
index 0000000..924e90c
--- /dev/null
@@ -0,0 +1,1062 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/printk.h>
+#include <linux/bitops.h>
+#include "kfd_priv.h"
+#include "kfd_device_queue_manager.h"
+#include "kfd_mqd_manager.h"
+#include "cik_regs.h"
+#include "kfd_kernel_queue.h"
+#include "../../radeon/cik_reg.h"
+
+/* Size of the per-pipe EOP queue */
+#define CIK_HPD_EOP_BYTES_LOG2 11
+#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
+
+static bool is_mem_initialized;
+
+static int init_memory(struct device_queue_manager *dqm);
+static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
+                                       unsigned int pasid, unsigned int vmid);
+
+static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+                                       struct queue *q,
+                                       struct qcm_process_device *qpd);
+static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
+static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock);
+
+
+static inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
+{
+       BUG_ON(!dqm || !dqm->dev);
+       return dqm->dev->shared_resources.compute_pipe_count;
+}
+
+static inline unsigned int get_first_pipe(struct device_queue_manager *dqm)
+{
+       BUG_ON(!dqm);
+       return dqm->dev->shared_resources.first_compute_pipe;
+}
+
+static inline unsigned int get_pipes_num_cpsch(void)
+{
+       return PIPE_PER_ME_CP_SCHEDULING;
+}
+
+static inline unsigned int
+get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
+{
+       uint32_t nybble;
+
+       nybble = (pdd->lds_base >> 60) & 0x0E;
+
+       return nybble;
+
+}
+
+static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
+{
+       unsigned int shared_base;
+
+       shared_base = (pdd->lds_base >> 16) & 0xFF;
+
+       return shared_base;
+}
+
+static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble);
+static void init_process_memory(struct device_queue_manager *dqm,
+                               struct qcm_process_device *qpd)
+{
+       struct kfd_process_device *pdd;
+       unsigned int temp;
+
+       BUG_ON(!dqm || !qpd);
+
+       pdd = qpd_to_pdd(qpd);
+
+       /* check if sh_mem_config register already configured */
+       if (qpd->sh_mem_config == 0) {
+               qpd->sh_mem_config =
+                       ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
+                       DEFAULT_MTYPE(MTYPE_NONCACHED) |
+                       APE1_MTYPE(MTYPE_NONCACHED);
+               qpd->sh_mem_ape1_limit = 0;
+               qpd->sh_mem_ape1_base = 0;
+       }
+
+       if (qpd->pqm->process->is_32bit_user_mode) {
+               temp = get_sh_mem_bases_32(pdd);
+               qpd->sh_mem_bases = SHARED_BASE(temp);
+               qpd->sh_mem_config |= PTR32;
+       } else {
+               temp = get_sh_mem_bases_nybble_64(pdd);
+               qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+       }
+
+       pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+               qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+}
+
+static void program_sh_mem_settings(struct device_queue_manager *dqm,
+                                       struct qcm_process_device *qpd)
+{
+       return kfd2kgd->program_sh_mem_settings(dqm->dev->kgd, qpd->vmid,
+                                               qpd->sh_mem_config,
+                                               qpd->sh_mem_ape1_base,
+                                               qpd->sh_mem_ape1_limit,
+                                               qpd->sh_mem_bases);
+}
+
+static int allocate_vmid(struct device_queue_manager *dqm,
+                       struct qcm_process_device *qpd,
+                       struct queue *q)
+{
+       int bit, allocated_vmid;
+
+       if (dqm->vmid_bitmap == 0)
+               return -ENOMEM;
+
+       bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, CIK_VMID_NUM);
+       clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
+
+       /* Kaveri kfd vmid's starts from vmid 8 */
+       allocated_vmid = bit + KFD_VMID_START_OFFSET;
+       pr_debug("kfd: vmid allocation %d\n", allocated_vmid);
+       qpd->vmid = allocated_vmid;
+       q->properties.vmid = allocated_vmid;
+
+       set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
+       program_sh_mem_settings(dqm, qpd);
+
+       return 0;
+}
+
+static void deallocate_vmid(struct device_queue_manager *dqm,
+                               struct qcm_process_device *qpd,
+                               struct queue *q)
+{
+       int bit = qpd->vmid - KFD_VMID_START_OFFSET;
+
+       set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
+       qpd->vmid = 0;
+       q->properties.vmid = 0;
+}
+
+static int create_queue_nocpsch(struct device_queue_manager *dqm,
+                               struct queue *q,
+                               struct qcm_process_device *qpd,
+                               int *allocated_vmid)
+{
+       int retval;
+
+       BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
+
+       pr_debug("kfd: In func %s\n", __func__);
+       print_queue(q);
+
+       mutex_lock(&dqm->lock);
+
+       if (list_empty(&qpd->queues_list)) {
+               retval = allocate_vmid(dqm, qpd, q);
+               if (retval != 0) {
+                       mutex_unlock(&dqm->lock);
+                       return retval;
+               }
+       }
+       *allocated_vmid = qpd->vmid;
+       q->properties.vmid = qpd->vmid;
+
+       retval = create_compute_queue_nocpsch(dqm, q, qpd);
+
+       if (retval != 0) {
+               if (list_empty(&qpd->queues_list)) {
+                       deallocate_vmid(dqm, qpd, q);
+                       *allocated_vmid = 0;
+               }
+               mutex_unlock(&dqm->lock);
+               return retval;
+       }
+
+       list_add(&q->list, &qpd->queues_list);
+       dqm->queue_count++;
+
+       mutex_unlock(&dqm->lock);
+       return 0;
+}
+
+static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
+{
+       bool set;
+       int pipe, bit;
+
+       set = false;
+
+       for (pipe = dqm->next_pipe_to_allocate; pipe < get_pipes_num(dqm);
+                       pipe = (pipe + 1) % get_pipes_num(dqm)) {
+               if (dqm->allocated_queues[pipe] != 0) {
+                       bit = find_first_bit(
+                               (unsigned long *)&dqm->allocated_queues[pipe],
+                               QUEUES_PER_PIPE);
+
+                       clear_bit(bit,
+                               (unsigned long *)&dqm->allocated_queues[pipe]);
+                       q->pipe = pipe;
+                       q->queue = bit;
+                       set = true;
+                       break;
+               }
+       }
+
+       if (set == false)
+               return -EBUSY;
+
+       pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
+                               __func__, q->pipe, q->queue);
+       /* horizontal hqd allocation */
+       dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_num(dqm);
+
+       return 0;
+}
+
+static inline void deallocate_hqd(struct device_queue_manager *dqm,
+                               struct queue *q)
+{
+       set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]);
+}
+
+static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+                                       struct queue *q,
+                                       struct qcm_process_device *qpd)
+{
+       int retval;
+       struct mqd_manager *mqd;
+
+       BUG_ON(!dqm || !q || !qpd);
+
+       mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
+       if (mqd == NULL)
+               return -ENOMEM;
+
+       retval = allocate_hqd(dqm, q);
+       if (retval != 0)
+               return retval;
+
+       retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+                               &q->gart_mqd_addr, &q->properties);
+       if (retval != 0) {
+               deallocate_hqd(dqm, q);
+               return retval;
+       }
+
+       return 0;
+}
+
+static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
+                               struct qcm_process_device *qpd,
+                               struct queue *q)
+{
+       int retval;
+       struct mqd_manager *mqd;
+
+       BUG_ON(!dqm || !q || !q->mqd || !qpd);
+
+       retval = 0;
+
+       pr_debug("kfd: In Func %s\n", __func__);
+
+       mutex_lock(&dqm->lock);
+       mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
+       if (mqd == NULL) {
+               retval = -ENOMEM;
+               goto out;
+       }
+
+       retval = mqd->destroy_mqd(mqd, q->mqd,
+                               KFD_PREEMPT_TYPE_WAVEFRONT,
+                               QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
+                               q->pipe, q->queue);
+
+       if (retval != 0)
+               goto out;
+
+       deallocate_hqd(dqm, q);
+
+       mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+
+       list_del(&q->list);
+       if (list_empty(&qpd->queues_list))
+               deallocate_vmid(dqm, qpd, q);
+       dqm->queue_count--;
+out:
+       mutex_unlock(&dqm->lock);
+       return retval;
+}
+
+static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+{
+       int retval;
+       struct mqd_manager *mqd;
+
+       BUG_ON(!dqm || !q || !q->mqd);
+
+       mutex_lock(&dqm->lock);
+       mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
+       if (mqd == NULL) {
+               mutex_unlock(&dqm->lock);
+               return -ENOMEM;
+       }
+
+       retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
+       if (q->properties.is_active == true)
+               dqm->queue_count++;
+       else
+               dqm->queue_count--;
+
+       if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
+               retval = execute_queues_cpsch(dqm, false);
+
+       mutex_unlock(&dqm->lock);
+       return retval;
+}
+
+static struct mqd_manager *get_mqd_manager_nocpsch(
+               struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
+{
+       struct mqd_manager *mqd;
+
+       BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
+
+       pr_debug("kfd: In func %s mqd type %d\n", __func__, type);
+
+       mqd = dqm->mqds[type];
+       if (!mqd) {
+               mqd = mqd_manager_init(type, dqm->dev);
+               if (mqd == NULL)
+                       pr_err("kfd: mqd manager is NULL");
+               dqm->mqds[type] = mqd;
+       }
+
+       return mqd;
+}
+
+static int register_process_nocpsch(struct device_queue_manager *dqm,
+                                       struct qcm_process_device *qpd)
+{
+       struct device_process_node *n;
+
+       BUG_ON(!dqm || !qpd);
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
+       if (!n)
+               return -ENOMEM;
+
+       n->qpd = qpd;
+
+       mutex_lock(&dqm->lock);
+       list_add(&n->list, &dqm->queues);
+
+       init_process_memory(dqm, qpd);
+       dqm->processes_count++;
+
+       mutex_unlock(&dqm->lock);
+
+       return 0;
+}
+
+static int unregister_process_nocpsch(struct device_queue_manager *dqm,
+                                       struct qcm_process_device *qpd)
+{
+       int retval;
+       struct device_process_node *cur, *next;
+
+       BUG_ON(!dqm || !qpd);
+
+       BUG_ON(!list_empty(&qpd->queues_list));
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       retval = 0;
+       mutex_lock(&dqm->lock);
+
+       list_for_each_entry_safe(cur, next, &dqm->queues, list) {
+               if (qpd == cur->qpd) {
+                       list_del(&cur->list);
+                       kfree(cur);
+                       dqm->processes_count--;
+                       goto out;
+               }
+       }
+       /* qpd not found in dqm list */
+       retval = 1;
+out:
+       mutex_unlock(&dqm->lock);
+       return retval;
+}
+
+static int
+set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
+                       unsigned int vmid)
+{
+       uint32_t pasid_mapping;
+
+       pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+                                               ATC_VMID_PASID_MAPPING_VALID;
+       return kfd2kgd->set_pasid_vmid_mapping(dqm->dev->kgd, pasid_mapping,
+                                               vmid);
+}
+
+static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+{
+       /* In 64-bit mode, we can only control the top 3 bits of the LDS,
+        * scratch and GPUVM apertures.
+        * The hardware fills in the remaining 59 bits according to the
+        * following pattern:
+        * LDS:         X0000000'00000000 - X0000001'00000000 (4GB)
+        * Scratch:     X0000001'00000000 - X0000002'00000000 (4GB)
+        * GPUVM:       Y0010000'00000000 - Y0020000'00000000 (1TB)
+        *
+        * (where X/Y is the configurable nybble with the low-bit 0)
+        *
+        * LDS and scratch will have the same top nybble programmed in the
+        * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
+        * GPUVM can have a different top nybble programmed in the
+        * top 3 bits of SH_MEM_BASES.SHARED_BASE.
+        * We don't bother to support different top nybbles
+        * for LDS/Scratch and GPUVM.
+        */
+
+       BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+               top_address_nybble == 0);
+
+       return PRIVATE_BASE(top_address_nybble << 12) |
+                       SHARED_BASE(top_address_nybble << 12);
+}
+
+static int init_memory(struct device_queue_manager *dqm)
+{
+       int i, retval;
+
+       for (i = 8; i < 16; i++)
+               set_pasid_vmid_mapping(dqm, 0, i);
+
+       retval = kfd2kgd->init_memory(dqm->dev->kgd);
+       if (retval == 0)
+               is_mem_initialized = true;
+       return retval;
+}
+
+
+static int init_pipelines(struct device_queue_manager *dqm,
+                       unsigned int pipes_num, unsigned int first_pipe)
+{
+       void *hpdptr;
+       struct mqd_manager *mqd;
+       unsigned int i, err, inx;
+       uint64_t pipe_hpd_addr;
+
+       BUG_ON(!dqm || !dqm->dev);
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       /*
+        * Allocate memory for the HPDs. This is hardware-owned per-pipe data.
+        * The driver never accesses this memory after zeroing it.
+        * It doesn't even have to be saved/restored on suspend/resume
+        * because it contains no data when there are no active queues.
+        */
+
+       err = kfd2kgd->allocate_mem(dqm->dev->kgd,
+                               CIK_HPD_EOP_BYTES * pipes_num,
+                               PAGE_SIZE,
+                               KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+                               (struct kgd_mem **) &dqm->pipeline_mem);
+
+       if (err) {
+               pr_err("kfd: error allocate vidmem num pipes: %d\n",
+                       pipes_num);
+               return -ENOMEM;
+       }
+
+       hpdptr = dqm->pipeline_mem->cpu_ptr;
+       dqm->pipelines_addr = dqm->pipeline_mem->gpu_addr;
+
+       memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
+
+       mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
+       if (mqd == NULL) {
+               kfd2kgd->free_mem(dqm->dev->kgd,
+                               (struct kgd_mem *) dqm->pipeline_mem);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < pipes_num; i++) {
+               inx = i + first_pipe;
+               pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
+               pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
+               /* = log2(bytes/4)-1 */
+               kfd2kgd->init_pipeline(dqm->dev->kgd, i,
+                               CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
+       }
+
+       return 0;
+}
+
+
+static int init_scheduler(struct device_queue_manager *dqm)
+{
+       int retval;
+
+       BUG_ON(!dqm);
+
+       pr_debug("kfd: In %s\n", __func__);
+
+       retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
+       if (retval != 0)
+               return retval;
+
+       retval = init_memory(dqm);
+
+       return retval;
+}
+
+static int initialize_nocpsch(struct device_queue_manager *dqm)
+{
+       int i;
+
+       BUG_ON(!dqm);
+
+       pr_debug("kfd: In func %s num of pipes: %d\n",
+                       __func__, get_pipes_num(dqm));
+
+       mutex_init(&dqm->lock);
+       INIT_LIST_HEAD(&dqm->queues);
+       dqm->queue_count = dqm->next_pipe_to_allocate = 0;
+       dqm->allocated_queues = kcalloc(get_pipes_num(dqm),
+                                       sizeof(unsigned int), GFP_KERNEL);
+       if (!dqm->allocated_queues) {
+               mutex_destroy(&dqm->lock);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < get_pipes_num(dqm); i++)
+               dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1;
+
+       dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
+
+       init_scheduler(dqm);
+       return 0;
+}
+
+static void uninitialize_nocpsch(struct device_queue_manager *dqm)
+{
+       int i;
+
+       BUG_ON(!dqm);
+
+       BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
+
+       kfree(dqm->allocated_queues);
+       for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
+               kfree(dqm->mqds[i]);
+       mutex_destroy(&dqm->lock);
+       kfd2kgd->free_mem(dqm->dev->kgd,
+                       (struct kgd_mem *) dqm->pipeline_mem);
+}
+
+static int start_nocpsch(struct device_queue_manager *dqm)
+{
+       return 0;
+}
+
+static int stop_nocpsch(struct device_queue_manager *dqm)
+{
+       return 0;
+}
+
+/*
+ * Device Queue Manager implementation for cp scheduler
+ */
+
+static int set_sched_resources(struct device_queue_manager *dqm)
+{
+       struct scheduling_resources res;
+       unsigned int queue_num, queue_mask;
+
+       BUG_ON(!dqm);
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       queue_num = get_pipes_num_cpsch() * QUEUES_PER_PIPE;
+       queue_mask = (1 << queue_num) - 1;
+       res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
+       res.vmid_mask <<= KFD_VMID_START_OFFSET;
+       res.queue_mask = queue_mask << (get_first_pipe(dqm) * QUEUES_PER_PIPE);
+       res.gws_mask = res.oac_mask = res.gds_heap_base =
+                                               res.gds_heap_size = 0;
+
+       pr_debug("kfd: scheduling resources:\n"
+                       "      vmid mask: 0x%8X\n"
+                       "      queue mask: 0x%8llX\n",
+                       res.vmid_mask, res.queue_mask);
+
+       return pm_send_set_resources(&dqm->packets, &res);
+}
+
+static int initialize_cpsch(struct device_queue_manager *dqm)
+{
+       int retval;
+
+       BUG_ON(!dqm);
+
+       pr_debug("kfd: In func %s num of pipes: %d\n",
+                       __func__, get_pipes_num_cpsch());
+
+       mutex_init(&dqm->lock);
+       INIT_LIST_HEAD(&dqm->queues);
+       dqm->queue_count = dqm->processes_count = 0;
+       dqm->active_runlist = false;
+       retval = init_pipelines(dqm, get_pipes_num(dqm), 0);
+       if (retval != 0)
+               goto fail_init_pipelines;
+
+       return 0;
+
+fail_init_pipelines:
+       mutex_destroy(&dqm->lock);
+       return retval;
+}
+
+static int start_cpsch(struct device_queue_manager *dqm)
+{
+       struct device_process_node *node;
+       int retval;
+
+       BUG_ON(!dqm);
+
+       retval = 0;
+
+       retval = pm_init(&dqm->packets, dqm);
+       if (retval != 0)
+               goto fail_packet_manager_init;
+
+       retval = set_sched_resources(dqm);
+       if (retval != 0)
+               goto fail_set_sched_resources;
+
+       pr_debug("kfd: allocating fence memory\n");
+
+       /* allocate fence memory on the gart */
+       retval = kfd2kgd->allocate_mem(dqm->dev->kgd,
+                                       sizeof(*dqm->fence_addr),
+                                       32,
+                                       KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+                                       (struct kgd_mem **) &dqm->fence_mem);
+
+       if (retval != 0)
+               goto fail_allocate_vidmem;
+
+       dqm->fence_addr = dqm->fence_mem->cpu_ptr;
+       dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
+
+       list_for_each_entry(node, &dqm->queues, list)
+               if (node->qpd->pqm->process && dqm->dev)
+                       kfd_bind_process_to_device(dqm->dev,
+                                               node->qpd->pqm->process);
+
+       execute_queues_cpsch(dqm, true);
+
+       return 0;
+fail_allocate_vidmem:
+fail_set_sched_resources:
+       pm_uninit(&dqm->packets);
+fail_packet_manager_init:
+       return retval;
+}
+
+static int stop_cpsch(struct device_queue_manager *dqm)
+{
+       struct device_process_node *node;
+       struct kfd_process_device *pdd;
+
+       BUG_ON(!dqm);
+
+       destroy_queues_cpsch(dqm, true);
+
+       list_for_each_entry(node, &dqm->queues, list) {
+               pdd = qpd_to_pdd(node->qpd);
+               pdd->bound = false;
+       }
+       kfd2kgd->free_mem(dqm->dev->kgd,
+                       (struct kgd_mem *) dqm->fence_mem);
+       pm_uninit(&dqm->packets);
+
+       return 0;
+}
+
+static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
+                                       struct kernel_queue *kq,
+                                       struct qcm_process_device *qpd)
+{
+       BUG_ON(!dqm || !kq || !qpd);
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       mutex_lock(&dqm->lock);
+       list_add(&kq->list, &qpd->priv_queue_list);
+       dqm->queue_count++;
+       qpd->is_debug = true;
+       execute_queues_cpsch(dqm, false);
+       mutex_unlock(&dqm->lock);
+
+       return 0;
+}
+
+static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
+                                       struct kernel_queue *kq,
+                                       struct qcm_process_device *qpd)
+{
+       BUG_ON(!dqm || !kq);
+
+       pr_debug("kfd: In %s\n", __func__);
+
+       mutex_lock(&dqm->lock);
+       destroy_queues_cpsch(dqm, false);
+       list_del(&kq->list);
+       dqm->queue_count--;
+       qpd->is_debug = false;
+       execute_queues_cpsch(dqm, false);
+       mutex_unlock(&dqm->lock);
+}
+
+static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+                       struct qcm_process_device *qpd, int *allocate_vmid)
+{
+       int retval;
+       struct mqd_manager *mqd;
+
+       BUG_ON(!dqm || !q || !qpd);
+
+       retval = 0;
+
+       if (allocate_vmid)
+               *allocate_vmid = 0;
+
+       mutex_lock(&dqm->lock);
+
+       mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
+       if (mqd == NULL) {
+               mutex_unlock(&dqm->lock);
+               return -ENOMEM;
+       }
+
+       retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+                               &q->gart_mqd_addr, &q->properties);
+       if (retval != 0)
+               goto out;
+
+       list_add(&q->list, &qpd->queues_list);
+       if (q->properties.is_active) {
+               dqm->queue_count++;
+               retval = execute_queues_cpsch(dqm, false);
+       }
+
+out:
+       mutex_unlock(&dqm->lock);
+       return retval;
+}
+
+static int fence_wait_timeout(unsigned int *fence_addr,
+                               unsigned int fence_value,
+                               unsigned long timeout)
+{
+       BUG_ON(!fence_addr);
+       timeout += jiffies;
+
+       while (*fence_addr != fence_value) {
+               if (time_after(jiffies, timeout)) {
+                       pr_err("kfd: qcm fence wait loop timeout expired\n");
+                       return -ETIME;
+               }
+               cpu_relax();
+       }
+
+       return 0;
+}
+
+static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
+{
+       int retval;
+
+       BUG_ON(!dqm);
+
+       retval = 0;
+
+       if (lock)
+               mutex_lock(&dqm->lock);
+       if (dqm->active_runlist == false)
+               goto out;
+       retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
+                       KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false, 0);
+       if (retval != 0)
+               goto out;
+
+       *dqm->fence_addr = KFD_FENCE_INIT;
+       pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
+                               KFD_FENCE_COMPLETED);
+       /* should be timed out */
+       fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
+                               QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
+       pm_release_ib(&dqm->packets);
+       dqm->active_runlist = false;
+
+out:
+       if (lock)
+               mutex_unlock(&dqm->lock);
+       return retval;
+}
+
+static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
+{
+       int retval;
+
+       BUG_ON(!dqm);
+
+       if (lock)
+               mutex_lock(&dqm->lock);
+
+       retval = destroy_queues_cpsch(dqm, false);
+       if (retval != 0) {
+               pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
+               goto out;
+       }
+
+       if (dqm->queue_count <= 0 || dqm->processes_count <= 0) {
+               retval = 0;
+               goto out;
+       }
+
+       if (dqm->active_runlist) {
+               retval = 0;
+               goto out;
+       }
+
+       retval = pm_send_runlist(&dqm->packets, &dqm->queues);
+       if (retval != 0) {
+               pr_err("kfd: failed to execute runlist");
+               goto out;
+       }
+       dqm->active_runlist = true;
+
+out:
+       if (lock)
+               mutex_unlock(&dqm->lock);
+       return retval;
+}
+
+static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+                               struct qcm_process_device *qpd,
+                               struct queue *q)
+{
+       int retval;
+       struct mqd_manager *mqd;
+
+       BUG_ON(!dqm || !qpd || !q);
+
+       retval = 0;
+
+       /* remove queue from list to prevent rescheduling after preemption */
+       mutex_lock(&dqm->lock);
+
+       mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
+       if (!mqd) {
+               retval = -ENOMEM;
+               goto failed;
+       }
+
+       list_del(&q->list);
+       dqm->queue_count--;
+
+       execute_queues_cpsch(dqm, false);
+
+       mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+
+       mutex_unlock(&dqm->lock);
+
+       return 0;
+
+failed:
+       mutex_unlock(&dqm->lock);
+       return retval;
+}
+
+/*
+ * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
+ * stay in user mode.
+ */
+#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
+/* APE1 limit is inclusive and 64K aligned. */
+#define APE1_LIMIT_ALIGNMENT 0xFFFF
+
+static bool set_cache_memory_policy(struct device_queue_manager *dqm,
+                                  struct qcm_process_device *qpd,
+                                  enum cache_policy default_policy,
+                                  enum cache_policy alternate_policy,
+                                  void __user *alternate_aperture_base,
+                                  uint64_t alternate_aperture_size)
+{
+       uint32_t default_mtype;
+       uint32_t ape1_mtype;
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       mutex_lock(&dqm->lock);
+
+       if (alternate_aperture_size == 0) {
+               /* base > limit disables APE1 */
+               qpd->sh_mem_ape1_base = 1;
+               qpd->sh_mem_ape1_limit = 0;
+       } else {
+               /*
+                * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
+                *                      SH_MEM_APE1_BASE[31:0], 0x0000 }
+                * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
+                *                      SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
+                * Verify that the base and size parameters can be
+                * represented in this format and convert them.
+                * Additionally restrict APE1 to user-mode addresses.
+                */
+
+               uint64_t base = (uintptr_t)alternate_aperture_base;
+               uint64_t limit = base + alternate_aperture_size - 1;
+
+               if (limit <= base)
+                       goto out;
+
+               if ((base & APE1_FIXED_BITS_MASK) != 0)
+                       goto out;
+
+               if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT)
+                       goto out;
+
+               qpd->sh_mem_ape1_base = base >> 16;
+               qpd->sh_mem_ape1_limit = limit >> 16;
+       }
+
+       default_mtype = (default_policy == cache_policy_coherent) ?
+                       MTYPE_NONCACHED :
+                       MTYPE_CACHED;
+
+       ape1_mtype = (alternate_policy == cache_policy_coherent) ?
+                       MTYPE_NONCACHED :
+                       MTYPE_CACHED;
+
+       qpd->sh_mem_config = (qpd->sh_mem_config & PTR32)
+                       | ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
+                       | DEFAULT_MTYPE(default_mtype)
+                       | APE1_MTYPE(ape1_mtype);
+
+       if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
+               program_sh_mem_settings(dqm, qpd);
+
+       pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
+               qpd->sh_mem_config, qpd->sh_mem_ape1_base,
+               qpd->sh_mem_ape1_limit);
+
+       mutex_unlock(&dqm->lock);
+       return true;
+
+out:
+       mutex_unlock(&dqm->lock);
+       return false;
+}
+
+struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+{
+       struct device_queue_manager *dqm;
+
+       BUG_ON(!dev);
+
+       dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
+       if (!dqm)
+               return NULL;
+
+       dqm->dev = dev;
+       switch (sched_policy) {
+       case KFD_SCHED_POLICY_HWS:
+       case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
+               /* initialize dqm for cp scheduling */
+               dqm->create_queue = create_queue_cpsch;
+               dqm->initialize = initialize_cpsch;
+               dqm->start = start_cpsch;
+               dqm->stop = stop_cpsch;
+               dqm->destroy_queue = destroy_queue_cpsch;
+               dqm->update_queue = update_queue;
+               dqm->get_mqd_manager = get_mqd_manager_nocpsch;
+               dqm->register_process = register_process_nocpsch;
+               dqm->unregister_process = unregister_process_nocpsch;
+               dqm->uninitialize = uninitialize_nocpsch;
+               dqm->create_kernel_queue = create_kernel_queue_cpsch;
+               dqm->destroy_kernel_queue = destroy_kernel_queue_cpsch;
+               dqm->set_cache_memory_policy = set_cache_memory_policy;
+               break;
+       case KFD_SCHED_POLICY_NO_HWS:
+               /* initialize dqm for no cp scheduling */
+               dqm->start = start_nocpsch;
+               dqm->stop = stop_nocpsch;
+               dqm->create_queue = create_queue_nocpsch;
+               dqm->destroy_queue = destroy_queue_nocpsch;
+               dqm->update_queue = update_queue;
+               dqm->get_mqd_manager = get_mqd_manager_nocpsch;
+               dqm->register_process = register_process_nocpsch;
+               dqm->unregister_process = unregister_process_nocpsch;
+               dqm->initialize = initialize_nocpsch;
+               dqm->uninitialize = uninitialize_nocpsch;
+               dqm->set_cache_memory_policy = set_cache_memory_policy;
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       if (dqm->initialize(dqm) != 0) {
+               kfree(dqm);
+               return NULL;
+       }
+
+       return dqm;
+}
+
+void device_queue_manager_uninit(struct device_queue_manager *dqm)
+{
+       BUG_ON(!dqm);
+
+       dqm->uninitialize(dqm);
+       kfree(dqm);
+}
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
new file mode 100644 (file)
index 0000000..c3f189e
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef KFD_DEVICE_QUEUE_MANAGER_H_
+#define KFD_DEVICE_QUEUE_MANAGER_H_
+
+#include <linux/rwsem.h>
+#include <linux/list.h>
+#include "kfd_priv.h"
+#include "kfd_mqd_manager.h"
+
+#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS       (500)
+#define QUEUES_PER_PIPE                                (8)
+#define PIPE_PER_ME_CP_SCHEDULING              (3)
+#define CIK_VMID_NUM                           (8)
+#define KFD_VMID_START_OFFSET                  (8)
+#define VMID_PER_DEVICE                                CIK_VMID_NUM
+#define KFD_DQM_FIRST_PIPE                     (0)
+
+struct device_process_node {
+       struct qcm_process_device *qpd;
+       struct list_head list;
+};
+
+/**
+ * struct device_queue_manager
+ *
+ * @create_queue: Queue creation routine.
+ *
+ * @destroy_queue: Queue destruction routine.
+ *
+ * @update_queue: Queue update routine.
+ *
+ * @get_mqd_manager: Returns the mqd manager according to the mqd type.
+ *
+ * @exeute_queues: Dispatches the queues list to the H/W.
+ *
+ * @register_process: This routine associates a specific process with device.
+ *
+ * @unregister_process: destroys the associations between process to device.
+ *
+ * @initialize: Initializes the pipelines and memory module for that device.
+ *
+ * @start: Initializes the resources/modules the the device needs for queues
+ * execution. This function is called on device initialization and after the
+ * system woke up after suspension.
+ *
+ * @stop: This routine stops execution of all the active queue running on the
+ * H/W and basically this function called on system suspend.
+ *
+ * @uninitialize: Destroys all the device queue manager resources allocated in
+ * initialize routine.
+ *
+ * @create_kernel_queue: Creates kernel queue. Used for debug queue.
+ *
+ * @destroy_kernel_queue: Destroys kernel queue. Used for debug queue.
+ *
+ * @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
+ * memory apertures.
+ *
+ * This struct is a base class for the kfd queues scheduler in the
+ * device level. The device base class should expose the basic operations
+ * for queue creation and queue destruction. This base class hides the
+ * scheduling mode of the driver and the specific implementation of the
+ * concrete device. This class is the only class in the queues scheduler
+ * that configures the H/W.
+ */
+
+struct device_queue_manager {
+       int     (*create_queue)(struct device_queue_manager *dqm,
+                               struct queue *q,
+                               struct qcm_process_device *qpd,
+                               int *allocate_vmid);
+       int     (*destroy_queue)(struct device_queue_manager *dqm,
+                               struct qcm_process_device *qpd,
+                               struct queue *q);
+       int     (*update_queue)(struct device_queue_manager *dqm,
+                               struct queue *q);
+
+       struct mqd_manager * (*get_mqd_manager)
+                                       (struct device_queue_manager *dqm,
+                                       enum KFD_MQD_TYPE type);
+
+       int     (*register_process)(struct device_queue_manager *dqm,
+                                       struct qcm_process_device *qpd);
+       int     (*unregister_process)(struct device_queue_manager *dqm,
+                                       struct qcm_process_device *qpd);
+       int     (*initialize)(struct device_queue_manager *dqm);
+       int     (*start)(struct device_queue_manager *dqm);
+       int     (*stop)(struct device_queue_manager *dqm);
+       void    (*uninitialize)(struct device_queue_manager *dqm);
+       int     (*create_kernel_queue)(struct device_queue_manager *dqm,
+                                       struct kernel_queue *kq,
+                                       struct qcm_process_device *qpd);
+       void    (*destroy_kernel_queue)(struct device_queue_manager *dqm,
+                                       struct kernel_queue *kq,
+                                       struct qcm_process_device *qpd);
+       bool    (*set_cache_memory_policy)(struct device_queue_manager *dqm,
+                                          struct qcm_process_device *qpd,
+                                          enum cache_policy default_policy,
+                                          enum cache_policy alternate_policy,
+                                          void __user *alternate_aperture_base,
+                                          uint64_t alternate_aperture_size);
+
+
+       struct mqd_manager      *mqds[KFD_MQD_TYPE_MAX];
+       struct packet_manager   packets;
+       struct kfd_dev          *dev;
+       struct mutex            lock;
+       struct list_head        queues;
+       unsigned int            processes_count;
+       unsigned int            queue_count;
+       unsigned int            next_pipe_to_allocate;
+       unsigned int            *allocated_queues;
+       unsigned int            vmid_bitmap;
+       uint64_t                pipelines_addr;
+       struct kfd_mem_obj      *pipeline_mem;
+       uint64_t                fence_gpu_addr;
+       unsigned int            *fence_addr;
+       struct kfd_mem_obj      *fence_mem;
+       bool                    active_runlist;
+};
+
+
+
+#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
new file mode 100644 (file)
index 0000000..b5791a5
--- /dev/null
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "kfd_priv.h"
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+/*
+ * This extension supports a kernel level doorbells management for
+ * the kernel queues.
+ * Basically the last doorbells page is devoted to kernel queues
+ * and that's assures that any user process won't get access to the
+ * kernel doorbells page
+ */
+static DEFINE_MUTEX(doorbell_mutex);
+static unsigned long doorbell_available_index[
+       DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)] = { 0 };
+
+#define KERNEL_DOORBELL_PASID 1
+#define KFD_SIZE_OF_DOORBELL_IN_BYTES 4
+
+/*
+ * Each device exposes a doorbell aperture, a PCI MMIO aperture that
+ * receives 32-bit writes that are passed to queues as wptr values.
+ * The doorbells are intended to be written by applications as part
+ * of queueing work on user-mode queues.
+ * We assign doorbells to applications in PAGE_SIZE-sized and aligned chunks.
+ * We map the doorbell address space into user-mode when a process creates
+ * its first queue on each device.
+ * Although the mapping is done by KFD, it is equivalent to an mmap of
+ * the /dev/kfd with the particular device encoded in the mmap offset.
+ * There will be other uses for mmap of /dev/kfd, so only a range of
+ * offsets (KFD_MMAP_DOORBELL_START-END) is used for doorbells.
+ */
+
+/* # of doorbell bytes allocated for each process. */
+static inline size_t doorbell_process_allocation(void)
+{
+       return roundup(KFD_SIZE_OF_DOORBELL_IN_BYTES *
+                       KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
+                       PAGE_SIZE);
+}
+
+/* Doorbell calculations for device init. */
+void kfd_doorbell_init(struct kfd_dev *kfd)
+{
+       size_t doorbell_start_offset;
+       size_t doorbell_aperture_size;
+       size_t doorbell_process_limit;
+
+       /*
+        * We start with calculations in bytes because the input data might
+        * only be byte-aligned.
+        * Only after we have done the rounding can we assume any alignment.
+        */
+
+       doorbell_start_offset =
+                       roundup(kfd->shared_resources.doorbell_start_offset,
+                                       doorbell_process_allocation());
+
+       doorbell_aperture_size =
+                       rounddown(kfd->shared_resources.doorbell_aperture_size,
+                                       doorbell_process_allocation());
+
+       if (doorbell_aperture_size > doorbell_start_offset)
+               doorbell_process_limit =
+                       (doorbell_aperture_size - doorbell_start_offset) /
+                                               doorbell_process_allocation();
+       else
+               doorbell_process_limit = 0;
+
+       kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
+                               doorbell_start_offset;
+
+       kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32);
+       kfd->doorbell_process_limit = doorbell_process_limit - 1;
+
+       kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
+                                               doorbell_process_allocation());
+
+       BUG_ON(!kfd->doorbell_kernel_ptr);
+
+       pr_debug("kfd: doorbell initialization:\n");
+       pr_debug("kfd: doorbell base           == 0x%08lX\n",
+                       (uintptr_t)kfd->doorbell_base);
+
+       pr_debug("kfd: doorbell_id_offset      == 0x%08lX\n",
+                       kfd->doorbell_id_offset);
+
+       pr_debug("kfd: doorbell_process_limit  == 0x%08lX\n",
+                       doorbell_process_limit);
+
+       pr_debug("kfd: doorbell_kernel_offset  == 0x%08lX\n",
+                       (uintptr_t)kfd->doorbell_base);
+
+       pr_debug("kfd: doorbell aperture size  == 0x%08lX\n",
+                       kfd->shared_resources.doorbell_aperture_size);
+
+       pr_debug("kfd: doorbell kernel address == 0x%08lX\n",
+                       (uintptr_t)kfd->doorbell_kernel_ptr);
+}
+
+int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
+{
+       phys_addr_t address;
+       struct kfd_dev *dev;
+
+       /*
+        * For simplicitly we only allow mapping of the entire doorbell
+        * allocation of a single device & process.
+        */
+       if (vma->vm_end - vma->vm_start != doorbell_process_allocation())
+               return -EINVAL;
+
+       /* Find kfd device according to gpu id */
+       dev = kfd_device_by_id(vma->vm_pgoff);
+       if (dev == NULL)
+               return -EINVAL;
+
+       /* Find if pdd exists for combination of process and gpu id */
+       if (!kfd_get_process_device_data(dev, process, 0))
+               return -EINVAL;
+
+       /* Calculate physical address of doorbell */
+       address = kfd_get_process_doorbells(dev, process);
+
+       vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+                               VM_DONTDUMP | VM_PFNMAP;
+
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+       pr_debug("kfd: mapping doorbell page in kfd_doorbell_mmap\n"
+                "     target user address == 0x%08llX\n"
+                "     physical address    == 0x%08llX\n"
+                "     vm_flags            == 0x%04lX\n"
+                "     size                == 0x%04lX\n",
+                (unsigned long long) vma->vm_start, address, vma->vm_flags,
+                doorbell_process_allocation());
+
+
+       return io_remap_pfn_range(vma,
+                               vma->vm_start,
+                               address >> PAGE_SHIFT,
+                               doorbell_process_allocation(),
+                               vma->vm_page_prot);
+}
+
+
+/* get kernel iomem pointer for a doorbell */
+u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+                                       unsigned int *doorbell_off)
+{
+       u32 inx;
+
+       BUG_ON(!kfd || !doorbell_off);
+
+       mutex_lock(&doorbell_mutex);
+       inx = find_first_zero_bit(doorbell_available_index,
+                                       KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
+
+       __set_bit(inx, doorbell_available_index);
+       mutex_unlock(&doorbell_mutex);
+
+       if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
+               return NULL;
+
+       /*
+        * Calculating the kernel doorbell offset using "faked" kernel
+        * pasid that allocated for kernel queues only
+        */
+       *doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() /
+                                                       sizeof(u32)) + inx;
+
+       pr_debug("kfd: get kernel queue doorbell\n"
+                        "     doorbell offset   == 0x%08d\n"
+                        "     kernel address    == 0x%08lX\n",
+               *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
+
+       return kfd->doorbell_kernel_ptr + inx;
+}
+
+void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
+{
+       unsigned int inx;
+
+       BUG_ON(!kfd || !db_addr);
+
+       inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
+
+       mutex_lock(&doorbell_mutex);
+       __clear_bit(inx, doorbell_available_index);
+       mutex_unlock(&doorbell_mutex);
+}
+
+inline void write_kernel_doorbell(u32 __iomem *db, u32 value)
+{
+       if (db) {
+               writel(value, db);
+               pr_debug("writing %d to doorbell address 0x%p\n", value, db);
+       }
+}
+
+/*
+ * queue_ids are in the range [0,MAX_PROCESS_QUEUES) and are mapped 1:1
+ * to doorbells with the process's doorbell page
+ */
+unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
+                                       struct kfd_process *process,
+                                       unsigned int queue_id)
+{
+       /*
+        * doorbell_id_offset accounts for doorbells taken by KGD.
+        * pasid * doorbell_process_allocation/sizeof(u32) adjusts
+        * to the process's doorbells
+        */
+       return kfd->doorbell_id_offset +
+               process->pasid * (doorbell_process_allocation()/sizeof(u32)) +
+               queue_id;
+}
+
+uint64_t kfd_get_number_elems(struct kfd_dev *kfd)
+{
+       uint64_t num_of_elems = (kfd->shared_resources.doorbell_aperture_size -
+                               kfd->shared_resources.doorbell_start_offset) /
+                                       doorbell_process_allocation() + 1;
+
+       return num_of_elems;
+
+}
+
+phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
+                                       struct kfd_process *process)
+{
+       return dev->doorbell_base +
+               process->pasid * doorbell_process_allocation();
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
new file mode 100644 (file)
index 0000000..66df4da
--- /dev/null
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
+#include <uapi/linux/kfd_ioctl.h>
+#include <linux/time.h>
+#include "kfd_priv.h"
+#include <linux/mm.h>
+#include <uapi/asm-generic/mman-common.h>
+#include <asm/processor.h>
+
+/*
+ * The primary memory I/O features being added for revisions of gfxip
+ * beyond 7.0 (Kaveri) are:
+ *
+ * Access to ATC/IOMMU mapped memory w/ associated extension of VA to 48b
+ *
+ * “Flat” shader memory access – These are new shader vector memory
+ * operations that do not reference a T#/V# so a “pointer” is what is
+ * sourced from the vector gprs for direct access to memory.
+ * This pointer space has the Shared(LDS) and Private(Scratch) memory
+ * mapped into this pointer space as apertures.
+ * The hardware then determines how to direct the memory request
+ * based on what apertures the request falls in.
+ *
+ * Unaligned support and alignment check
+ *
+ *
+ * System Unified Address - SUA
+ *
+ * The standard usage for GPU virtual addresses are that they are mapped by
+ * a set of page tables we call GPUVM and these page tables are managed by
+ * a combination of vidMM/driver software components.  The current virtual
+ * address (VA) range for GPUVM is 40b.
+ *
+ * As of gfxip7.1 and beyond we’re adding the ability for compute memory
+ * clients (CP/RLC, DMA, SHADER(ifetch, scalar, and vector ops)) to access
+ * the same page tables used by host x86 processors and that are managed by
+ * the operating system. This is via a technique and hardware called ATC/IOMMU.
+ * The GPU has the capability of accessing both the GPUVM and ATC address
+ * spaces for a given VMID (process) simultaneously and we call this feature
+ * system unified address (SUA).
+ *
+ * There are three fundamental address modes of operation for a given VMID
+ * (process) on the GPU:
+ *
+ *     HSA64 – 64b pointers and the default address space is ATC
+ *     HSA32 – 32b pointers and the default address space is ATC
+ *     GPUVM – 64b pointers and the default address space is GPUVM (driver
+ *             model mode)
+ *
+ *
+ * HSA64 - ATC/IOMMU 64b
+ *
+ * A 64b pointer in the AMD64/IA64 CPU architecture is not fully utilized
+ * by the CPU so an AMD CPU can only access the high area
+ * (VA[63:47] == 0x1FFFF) and low area (VA[63:47 == 0) of the address space
+ * so the actual VA carried to translation is 48b.  There is a “hole” in
+ * the middle of the 64b VA space.
+ *
+ * The GPU not only has access to all of the CPU accessible address space via
+ * ATC/IOMMU, but it also has access to the GPUVM address space.  The “system
+ * unified address” feature (SUA) is the mapping of GPUVM and ATC address
+ * spaces into a unified pointer space.  The method we take for 64b mode is
+ * to map the full 40b GPUVM address space into the hole of the 64b address
+ * space.
+
+ * The GPUVM_Base/GPUVM_Limit defines the aperture in the 64b space where we
+ * direct requests to be translated via GPUVM page tables instead of the
+ * IOMMU path.
+ *
+ *
+ * 64b to 49b Address conversion
+ *
+ * Note that there are still significant portions of unused regions (holes)
+ * in the 64b address space even for the GPU.  There are several places in
+ * the pipeline (sw and hw), we wish to compress the 64b virtual address
+ * to a 49b address.  This 49b address is constituted of an “ATC” bit
+ * plus a 48b virtual address.  This 49b address is what is passed to the
+ * translation hardware.  ATC==0 means the 48b address is a GPUVM address
+ * (max of 2^40 – 1) intended to be translated via GPUVM page tables.
+ * ATC==1 means the 48b address is intended to be translated via IOMMU
+ * page tables.
+ *
+ * A 64b pointer is compared to the apertures that are defined (Base/Limit), in
+ * this case the GPUVM aperture (red) is defined and if a pointer falls in this
+ * aperture, we subtract the GPUVM_Base address and set the ATC bit to zero
+ * as part of the 64b to 49b conversion.
+ *
+ * Where this 64b to 49b conversion is done is a function of the usage.
+ * Most GPU memory access is via memory objects where the driver builds
+ * a descriptor which consists of a base address and a memory access by
+ * the GPU usually consists of some kind of an offset or Cartesian coordinate
+ * that references this memory descriptor.  This is the case for shader
+ * instructions that reference the T# or V# constants, or for specified
+ * locations of assets (ex. the shader program location).  In these cases
+ * the driver is what handles the 64b to 49b conversion and the base
+ * address in the descriptor (ex. V# or T# or shader program location)
+ * is defined as a 48b address w/ an ATC bit.  For this usage a given
+ * memory object cannot straddle multiple apertures in the 64b address
+ * space. For example a shader program cannot jump in/out between ATC
+ * and GPUVM space.
+ *
+ * In some cases we wish to pass a 64b pointer to the GPU hardware and
+ * the GPU hw does the 64b to 49b conversion before passing memory
+ * requests to the cache/memory system.  This is the case for the
+ * S_LOAD and FLAT_* shader memory instructions where we have 64b pointers
+ * in scalar and vector GPRs respectively.
+ *
+ * In all cases (no matter where the 64b -> 49b conversion is done), the gfxip
+ * hardware sends a 48b address along w/ an ATC bit, to the memory controller
+ * on the memory request interfaces.
+ *
+ *     <client>_MC_rdreq_atc   // read request ATC bit
+ *
+ *             0 : <client>_MC_rdreq_addr is a GPUVM VA
+ *
+ *             1 : <client>_MC_rdreq_addr is a ATC VA
+ *
+ *
+ * “Spare” aperture (APE1)
+ *
+ * We use the GPUVM aperture to differentiate ATC vs. GPUVM, but we also use
+ * apertures to set the Mtype field for S_LOAD/FLAT_* ops which is input to the
+ * config tables for setting cache policies. The “spare” (APE1) aperture is
+ * motivated by getting a different Mtype from the default.
+ * The default aperture isn’t an actual base/limit aperture; it is just the
+ * address space that doesn’t hit any defined base/limit apertures.
+ * The following diagram is a complete picture of the gfxip7.x SUA apertures.
+ * The APE1 can be placed either below or above
+ * the hole (cannot be in the hole).
+ *
+ *
+ * General Aperture definitions and rules
+ *
+ * An aperture register definition consists of a Base, Limit, Mtype, and
+ * usually an ATC bit indicating which translation tables that aperture uses.
+ * In all cases (for SUA and DUA apertures discussed later), aperture base
+ * and limit definitions are 64KB aligned.
+ *
+ *     <ape>_Base[63:0] = { <ape>_Base_register[63:16], 0x0000 }
+ *
+ *     <ape>_Limit[63:0] = { <ape>_Limit_register[63:16], 0xFFFF }
+ *
+ * The base and limit are considered inclusive to an aperture so being
+ * inside an aperture means (address >= Base) AND (address <= Limit).
+ *
+ * In no case is a payload that straddles multiple apertures expected to work.
+ * For example a load_dword_x4 that starts in one aperture and ends in another,
+ * does not work.  For the vector FLAT_* ops we have detection capability in
+ * the shader for reporting a “memory violation” back to the
+ * SQ block for use in traps.
+ * A memory violation results when an op falls into the hole,
+ * or a payload straddles multiple apertures.  The S_LOAD instruction
+ * does not have this detection.
+ *
+ * Apertures cannot overlap.
+ *
+ *
+ *
+ * HSA32 - ATC/IOMMU 32b
+ *
+ * For HSA32 mode, the pointers are interpreted as 32 bits and use a single GPR
+ * instead of two for the S_LOAD and FLAT_* ops. The entire GPUVM space of 40b
+ * will not fit so there is only partial visibility to the GPUVM
+ * space (defined by the aperture) for S_LOAD and FLAT_* ops.
+ * There is no spare (APE1) aperture for HSA32 mode.
+ *
+ *
+ * GPUVM 64b mode (driver model)
+ *
+ * This mode is related to HSA64 in that the difference really is that
+ * the default aperture is GPUVM (ATC==0) and not ATC space.
+ * We have gfxip7.x hardware that has FLAT_* and S_LOAD support for
+ * SUA GPUVM mode, but does not support HSA32/HSA64.
+ *
+ *
+ * Device Unified Address - DUA
+ *
+ * Device unified address (DUA) is the name of the feature that maps the
+ * Shared(LDS) memory and Private(Scratch) memory into the overall address
+ * space for use by the new FLAT_* vector memory ops.  The Shared and
+ * Private memories are mapped as apertures into the address space,
+ * and the hardware detects when a FLAT_* memory request is to be redirected
+ * to the LDS or Scratch memory when it falls into one of these apertures.
+ * Like the SUA apertures, the Shared/Private apertures are 64KB aligned and
+ * the base/limit is “in” the aperture. For both HSA64 and GPUVM SUA modes,
+ * the Shared/Private apertures are always placed in a limited selection of
+ * options in the hole of the 64b address space. For HSA32 mode, the
+ * Shared/Private apertures can be placed anywhere in the 32b space
+ * except at 0.
+ *
+ *
+ * HSA64 Apertures for FLAT_* vector ops
+ *
+ * For HSA64 SUA mode, the Shared and Private apertures are always placed
+ * in the hole w/ a limited selection of possible locations. The requests
+ * that fall in the private aperture are expanded as a function of the
+ * work-item id (tid) and redirected to the location of the
+ * “hidden private memory”. The hidden private can be placed in either GPUVM
+ * or ATC space. The addresses that fall in the shared aperture are
+ * re-directed to the on-chip LDS memory hardware.
+ *
+ *
+ * HSA32 Apertures for FLAT_* vector ops
+ *
+ * In HSA32 mode, the Private and Shared apertures can be placed anywhere
+ * in the 32b space except at 0 (Private or Shared Base at zero disables
+ * the apertures). If the base address of the apertures are non-zero
+ * (ie apertures exists), the size is always 64KB.
+ *
+ *
+ * GPUVM Apertures for FLAT_* vector ops
+ *
+ * In GPUVM mode, the Shared/Private apertures are specified identically
+ * to HSA64 mode where they are always in the hole at a limited selection
+ * of locations.
+ *
+ *
+ * Aperture Definitions for SUA and DUA
+ *
+ * The interpretation of the aperture register definitions for a given
+ * VMID is a function of the “SUA Mode” which is one of HSA64, HSA32, or
+ * GPUVM64 discussed in previous sections. The mode is first decoded, and
+ * then the remaining register decode is a function of the mode.
+ *
+ *
+ * SUA Mode Decode
+ *
+ * For the S_LOAD and FLAT_* shader operations, the SUA mode is decoded from
+ * the COMPUTE_DISPATCH_INITIATOR:DATA_ATC bit and
+ * the SH_MEM_CONFIG:PTR32 bits.
+ *
+ * COMPUTE_DISPATCH_INITIATOR:DATA_ATC    SH_MEM_CONFIG:PTR32        Mode
+ *
+ * 1                                              0                  HSA64
+ *
+ * 1                                              1                  HSA32
+ *
+ * 0                                              X                 GPUVM64
+ *
+ * In general the hardware will ignore the PTR32 bit and treat
+ * as “0” whenever DATA_ATC = “0”, but sw should set PTR32=0
+ * when DATA_ATC=0.
+ *
+ * The DATA_ATC bit is only set for compute dispatches.
+ * All “Draw” dispatches are hardcoded to GPUVM64 mode
+ * for FLAT_* / S_LOAD operations.
+ */
+
+#define MAKE_GPUVM_APP_BASE(gpu_num) \
+       (((uint64_t)(gpu_num) << 61) + 0x1000000000000L)
+
+#define MAKE_GPUVM_APP_LIMIT(base) \
+       (((uint64_t)(base) & \
+               0xFFFFFF0000000000UL) | 0xFFFFFFFFFFL)
+
+#define MAKE_SCRATCH_APP_BASE(gpu_num) \
+       (((uint64_t)(gpu_num) << 61) + 0x100000000L)
+
+#define MAKE_SCRATCH_APP_LIMIT(base) \
+       (((uint64_t)base & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
+
+#define MAKE_LDS_APP_BASE(gpu_num) \
+       (((uint64_t)(gpu_num) << 61) + 0x0)
+#define MAKE_LDS_APP_LIMIT(base) \
+       (((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
+
+int kfd_init_apertures(struct kfd_process *process)
+{
+       uint8_t id  = 0;
+       struct kfd_dev *dev;
+       struct kfd_process_device *pdd;
+
+       mutex_lock(&process->mutex);
+
+       /*Iterating over all devices*/
+       while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
+               id < NUM_OF_SUPPORTED_GPUS) {
+
+               pdd = kfd_get_process_device_data(dev, process, 1);
+
+               /*
+                * For 64 bit process aperture will be statically reserved in
+                * the x86_64 non canonical process address space
+                * amdkfd doesn't currently support apertures for 32 bit process
+                */
+               if (process->is_32bit_user_mode) {
+                       pdd->lds_base = pdd->lds_limit = 0;
+                       pdd->gpuvm_base = pdd->gpuvm_limit = 0;
+                       pdd->scratch_base = pdd->scratch_limit = 0;
+               } else {
+                       /*
+                        * node id couldn't be 0 - the three MSB bits of
+                        * aperture shoudn't be 0
+                        */
+                       pdd->lds_base = MAKE_LDS_APP_BASE(id + 1);
+
+                       pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
+
+                       pdd->gpuvm_base = MAKE_GPUVM_APP_BASE(id + 1);
+
+                       pdd->gpuvm_limit =
+                                       MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base);
+
+                       pdd->scratch_base = MAKE_SCRATCH_APP_BASE(id + 1);
+
+                       pdd->scratch_limit =
+                               MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+               }
+
+               dev_dbg(kfd_device, "node id %u\n", id);
+               dev_dbg(kfd_device, "gpu id %u\n", pdd->dev->id);
+               dev_dbg(kfd_device, "lds_base %llX\n", pdd->lds_base);
+               dev_dbg(kfd_device, "lds_limit %llX\n", pdd->lds_limit);
+               dev_dbg(kfd_device, "gpuvm_base %llX\n", pdd->gpuvm_base);
+               dev_dbg(kfd_device, "gpuvm_limit %llX\n", pdd->gpuvm_limit);
+               dev_dbg(kfd_device, "scratch_base %llX\n", pdd->scratch_base);
+               dev_dbg(kfd_device, "scratch_limit %llX\n", pdd->scratch_limit);
+
+               id++;
+       }
+
+       mutex_unlock(&process->mutex);
+
+       return 0;
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
new file mode 100644 (file)
index 0000000..5b99909
--- /dev/null
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * KFD Interrupts.
+ *
+ * AMD GPUs deliver interrupts by pushing an interrupt description onto the
+ * interrupt ring and then sending an interrupt. KGD receives the interrupt
+ * in ISR and sends us a pointer to each new entry on the interrupt ring.
+ *
+ * We generally can't process interrupt-signaled events from ISR, so we call
+ * out to each interrupt client module (currently only the scheduler) to ask if
+ * each interrupt is interesting. If they return true, then it requires further
+ * processing so we copy it to an internal interrupt ring and call each
+ * interrupt client again from a work-queue.
+ *
+ * There's no acknowledgment for the interrupts we use. The hardware simply
+ * queues a new interrupt each time without waiting.
+ *
+ * The fixed-size internal queue means that it's possible for us to lose
+ * interrupts because we have no back-pressure to the hardware.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include "kfd_priv.h"
+
+#define KFD_INTERRUPT_RING_SIZE 256
+
+static void interrupt_wq(struct work_struct *);
+
+int kfd_interrupt_init(struct kfd_dev *kfd)
+{
+       void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
+                                       kfd->device_info->ih_ring_entry_size,
+                                       GFP_KERNEL);
+       if (!interrupt_ring)
+               return -ENOMEM;
+
+       kfd->interrupt_ring = interrupt_ring;
+       kfd->interrupt_ring_size =
+               KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size;
+       atomic_set(&kfd->interrupt_ring_wptr, 0);
+       atomic_set(&kfd->interrupt_ring_rptr, 0);
+
+       spin_lock_init(&kfd->interrupt_lock);
+
+       INIT_WORK(&kfd->interrupt_work, interrupt_wq);
+
+       kfd->interrupts_active = true;
+
+       /*
+        * After this function returns, the interrupt will be enabled. This
+        * barrier ensures that the interrupt running on a different processor
+        * sees all the above writes.
+        */
+       smp_wmb();
+
+       return 0;
+}
+
+void kfd_interrupt_exit(struct kfd_dev *kfd)
+{
+       /*
+        * Stop the interrupt handler from writing to the ring and scheduling
+        * workqueue items. The spinlock ensures that any interrupt running
+        * after we have unlocked sees interrupts_active = false.
+        */
+       unsigned long flags;
+
+       spin_lock_irqsave(&kfd->interrupt_lock, flags);
+       kfd->interrupts_active = false;
+       spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
+
+       /*
+        * Flush_scheduled_work ensures that there are no outstanding
+        * work-queue items that will access interrupt_ring. New work items
+        * can't be created because we stopped interrupt handling above.
+        */
+       flush_scheduled_work();
+
+       kfree(kfd->interrupt_ring);
+}
+
+/*
+ * This assumes that it can't be called concurrently with itself
+ * but only with dequeue_ih_ring_entry.
+ */
+bool enqueue_ih_ring_entry(struct kfd_dev *kfd,        const void *ih_ring_entry)
+{
+       unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
+       unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
+
+       if ((rptr - wptr) % kfd->interrupt_ring_size ==
+                                       kfd->device_info->ih_ring_entry_size) {
+               /* This is very bad, the system is likely to hang. */
+               dev_err_ratelimited(kfd_chardev(),
+                       "Interrupt ring overflow, dropping interrupt.\n");
+               return false;
+       }
+
+       memcpy(kfd->interrupt_ring + wptr, ih_ring_entry,
+                       kfd->device_info->ih_ring_entry_size);
+
+       wptr = (wptr + kfd->device_info->ih_ring_entry_size) %
+                       kfd->interrupt_ring_size;
+       smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */
+       atomic_set(&kfd->interrupt_ring_wptr, wptr);
+
+       return true;
+}
+
+/*
+ * This assumes that it can't be called concurrently with itself
+ * but only with enqueue_ih_ring_entry.
+ */
+static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
+{
+       /*
+        * Assume that wait queues have an implicit barrier, i.e. anything that
+        * happened in the ISR before it queued work is visible.
+        */
+
+       unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
+       unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
+
+       if (rptr == wptr)
+               return false;
+
+       memcpy(ih_ring_entry, kfd->interrupt_ring + rptr,
+                       kfd->device_info->ih_ring_entry_size);
+
+       rptr = (rptr + kfd->device_info->ih_ring_entry_size) %
+                       kfd->interrupt_ring_size;
+
+       /*
+        * Ensure the rptr write update is not visible until
+        * memcpy has finished reading.
+        */
+       smp_mb();
+       atomic_set(&kfd->interrupt_ring_rptr, rptr);
+
+       return true;
+}
+
+static void interrupt_wq(struct work_struct *work)
+{
+       struct kfd_dev *dev = container_of(work, struct kfd_dev,
+                                               interrupt_work);
+
+       uint32_t ih_ring_entry[DIV_ROUND_UP(
+                               dev->device_info->ih_ring_entry_size,
+                               sizeof(uint32_t))];
+
+       while (dequeue_ih_ring_entry(dev, ih_ring_entry))
+               ;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
new file mode 100644 (file)
index 0000000..9350714
--- /dev/null
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include "kfd_kernel_queue.h"
+#include "kfd_priv.h"
+#include "kfd_device_queue_manager.h"
+#include "kfd_pm4_headers.h"
+#include "kfd_pm4_opcodes.h"
+
+#define PM4_COUNT_ZERO (((1 << 15) - 1) << 16)
+
+static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+               enum kfd_queue_type type, unsigned int queue_size)
+{
+       struct queue_properties prop;
+       int retval;
+       union PM4_MES_TYPE_3_HEADER nop;
+
+       BUG_ON(!kq || !dev);
+       BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ);
+
+       pr_debug("kfd: In func %s initializing queue type %d size %d\n",
+                       __func__, KFD_QUEUE_TYPE_HIQ, queue_size);
+
+       nop.opcode = IT_NOP;
+       nop.type = PM4_TYPE_3;
+       nop.u32all |= PM4_COUNT_ZERO;
+
+       kq->dev = dev;
+       kq->nop_packet = nop.u32all;
+       switch (type) {
+       case KFD_QUEUE_TYPE_DIQ:
+       case KFD_QUEUE_TYPE_HIQ:
+               kq->mqd = dev->dqm->get_mqd_manager(dev->dqm,
+                                               KFD_MQD_TYPE_CIK_HIQ);
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       if (kq->mqd == NULL)
+               return false;
+
+       prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
+
+       if (prop.doorbell_ptr == NULL)
+               goto err_get_kernel_doorbell;
+
+       retval = kfd2kgd->allocate_mem(dev->kgd,
+                                       queue_size,
+                                       PAGE_SIZE,
+                                       KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+                                       (struct kgd_mem **) &kq->pq);
+
+       if (retval != 0)
+               goto err_pq_allocate_vidmem;
+
+       kq->pq_kernel_addr = kq->pq->cpu_ptr;
+       kq->pq_gpu_addr = kq->pq->gpu_addr;
+
+       retval = kfd2kgd->allocate_mem(dev->kgd,
+                                       sizeof(*kq->rptr_kernel),
+                                       32,
+                                       KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+                                       (struct kgd_mem **) &kq->rptr_mem);
+
+       if (retval != 0)
+               goto err_rptr_allocate_vidmem;
+
+       kq->rptr_kernel = kq->rptr_mem->cpu_ptr;
+       kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr;
+
+       retval = kfd2kgd->allocate_mem(dev->kgd,
+                                       sizeof(*kq->wptr_kernel),
+                                       32,
+                                       KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+                                       (struct kgd_mem **) &kq->wptr_mem);
+
+       if (retval != 0)
+               goto err_wptr_allocate_vidmem;
+
+       kq->wptr_kernel = kq->wptr_mem->cpu_ptr;
+       kq->wptr_gpu_addr = kq->wptr_mem->gpu_addr;
+
+       memset(kq->pq_kernel_addr, 0, queue_size);
+       memset(kq->rptr_kernel, 0, sizeof(*kq->rptr_kernel));
+       memset(kq->wptr_kernel, 0, sizeof(*kq->wptr_kernel));
+
+       prop.queue_size = queue_size;
+       prop.is_interop = false;
+       prop.priority = 1;
+       prop.queue_percent = 100;
+       prop.type = type;
+       prop.vmid = 0;
+       prop.queue_address = kq->pq_gpu_addr;
+       prop.read_ptr = (uint32_t *) kq->rptr_gpu_addr;
+       prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr;
+
+       if (init_queue(&kq->queue, prop) != 0)
+               goto err_init_queue;
+
+       kq->queue->device = dev;
+       kq->queue->process = kfd_get_process(current);
+
+       retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd,
+                                       &kq->queue->mqd_mem_obj,
+                                       &kq->queue->gart_mqd_addr,
+                                       &kq->queue->properties);
+       if (retval != 0)
+               goto err_init_mqd;
+
+       /* assign HIQ to HQD */
+       if (type == KFD_QUEUE_TYPE_HIQ) {
+               pr_debug("assigning hiq to hqd\n");
+               kq->queue->pipe = KFD_CIK_HIQ_PIPE;
+               kq->queue->queue = KFD_CIK_HIQ_QUEUE;
+               kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
+                                       kq->queue->queue, NULL);
+       } else {
+               /* allocate fence for DIQ */
+
+               retval = kfd2kgd->allocate_mem(dev->kgd,
+                                       sizeof(uint32_t),
+                                       32,
+                                       KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+                                       (struct kgd_mem **) &kq->fence_mem_obj);
+
+               if (retval != 0)
+                       goto err_alloc_fence;
+
+               kq->fence_kernel_address = kq->fence_mem_obj->cpu_ptr;
+               kq->fence_gpu_addr = kq->fence_mem_obj->gpu_addr;
+       }
+
+       print_queue(kq->queue);
+
+       return true;
+err_alloc_fence:
+err_init_mqd:
+       uninit_queue(kq->queue);
+err_init_queue:
+       kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->wptr_mem);
+err_wptr_allocate_vidmem:
+       kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->rptr_mem);
+err_rptr_allocate_vidmem:
+       kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->pq);
+err_pq_allocate_vidmem:
+       pr_err("kfd: error init pq\n");
+       kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
+err_get_kernel_doorbell:
+       pr_err("kfd: error init doorbell");
+       return false;
+
+}
+
+static void uninitialize(struct kernel_queue *kq)
+{
+       BUG_ON(!kq);
+
+       if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
+               kq->mqd->destroy_mqd(kq->mqd,
+                                       NULL,
+                                       false,
+                                       QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
+                                       kq->queue->pipe,
+                                       kq->queue->queue);
+
+       kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->rptr_mem);
+       kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->wptr_mem);
+       kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->pq);
+       kfd_release_kernel_doorbell(kq->dev,
+                                       kq->queue->properties.doorbell_ptr);
+       uninit_queue(kq->queue);
+}
+
+static int acquire_packet_buffer(struct kernel_queue *kq,
+               size_t packet_size_in_dwords, unsigned int **buffer_ptr)
+{
+       size_t available_size;
+       size_t queue_size_dwords;
+       uint32_t wptr, rptr;
+       unsigned int *queue_address;
+
+       BUG_ON(!kq || !buffer_ptr);
+
+       rptr = *kq->rptr_kernel;
+       wptr = *kq->wptr_kernel;
+       queue_address = (unsigned int *)kq->pq_kernel_addr;
+       queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t);
+
+       pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n",
+                       __func__, rptr, wptr, queue_address);
+
+       available_size = (rptr - 1 - wptr + queue_size_dwords) %
+                                                       queue_size_dwords;
+
+       if (packet_size_in_dwords >= queue_size_dwords ||
+                       packet_size_in_dwords >= available_size) {
+               /*
+                * make sure calling functions know
+                * acquire_packet_buffer() failed
+                */
+               *buffer_ptr = NULL;
+               return -ENOMEM;
+       }
+
+       if (wptr + packet_size_in_dwords >= queue_size_dwords) {
+               while (wptr > 0) {
+                       queue_address[wptr] = kq->nop_packet;
+                       wptr = (wptr + 1) % queue_size_dwords;
+               }
+       }
+
+       *buffer_ptr = &queue_address[wptr];
+       kq->pending_wptr = wptr + packet_size_in_dwords;
+
+       return 0;
+}
+
+static void submit_packet(struct kernel_queue *kq)
+{
+#ifdef DEBUG
+       int i;
+#endif
+
+       BUG_ON(!kq);
+
+#ifdef DEBUG
+       for (i = *kq->wptr_kernel; i < kq->pending_wptr; i++) {
+               pr_debug("0x%2X ", kq->pq_kernel_addr[i]);
+               if (i % 15 == 0)
+                       pr_debug("\n");
+       }
+       pr_debug("\n");
+#endif
+
+       *kq->wptr_kernel = kq->pending_wptr;
+       write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
+                               kq->pending_wptr);
+}
+
+static int sync_with_hw(struct kernel_queue *kq, unsigned long timeout_ms)
+{
+       unsigned long org_timeout_ms;
+
+       BUG_ON(!kq);
+
+       org_timeout_ms = timeout_ms;
+       timeout_ms += jiffies * 1000 / HZ;
+       while (*kq->wptr_kernel != *kq->rptr_kernel) {
+               if (time_after(jiffies * 1000 / HZ, timeout_ms)) {
+                       pr_err("kfd: kernel_queue %s timeout expired %lu\n",
+                               __func__, org_timeout_ms);
+                       pr_err("kfd: wptr: %d rptr: %d\n",
+                               *kq->wptr_kernel, *kq->rptr_kernel);
+                       return -ETIME;
+               }
+               schedule();
+       }
+
+       return 0;
+}
+
+static void rollback_packet(struct kernel_queue *kq)
+{
+       BUG_ON(!kq);
+       kq->pending_wptr = *kq->queue->properties.write_ptr;
+}
+
+struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+                                       enum kfd_queue_type type)
+{
+       struct kernel_queue *kq;
+
+       BUG_ON(!dev);
+
+       kq = kzalloc(sizeof(struct kernel_queue), GFP_KERNEL);
+       if (!kq)
+               return NULL;
+
+       kq->initialize = initialize;
+       kq->uninitialize = uninitialize;
+       kq->acquire_packet_buffer = acquire_packet_buffer;
+       kq->submit_packet = submit_packet;
+       kq->sync_with_hw = sync_with_hw;
+       kq->rollback_packet = rollback_packet;
+
+       if (kq->initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
+               pr_err("kfd: failed to init kernel queue\n");
+               kfree(kq);
+               return NULL;
+       }
+       return kq;
+}
+
+void kernel_queue_uninit(struct kernel_queue *kq)
+{
+       BUG_ON(!kq);
+
+       kq->uninitialize(kq);
+       kfree(kq);
+}
+
+static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
+{
+       struct kernel_queue *kq;
+       uint32_t *buffer, i;
+       int retval;
+
+       BUG_ON(!dev);
+
+       pr_debug("kfd: starting kernel queue test\n");
+
+       kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
+       BUG_ON(!kq);
+
+       retval = kq->acquire_packet_buffer(kq, 5, &buffer);
+       BUG_ON(retval != 0);
+       for (i = 0; i < 5; i++)
+               buffer[i] = kq->nop_packet;
+       kq->submit_packet(kq);
+       kq->sync_with_hw(kq, 1000);
+
+       pr_debug("kfd: ending kernel queue test\n");
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
new file mode 100644 (file)
index 0000000..dcd2bdb
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef KFD_KERNEL_QUEUE_H_
+#define KFD_KERNEL_QUEUE_H_
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include "kfd_priv.h"
+
+struct kernel_queue {
+       /* interface */
+       bool    (*initialize)(struct kernel_queue *kq, struct kfd_dev *dev,
+                       enum kfd_queue_type type, unsigned int queue_size);
+       void    (*uninitialize)(struct kernel_queue *kq);
+       int     (*acquire_packet_buffer)(struct kernel_queue *kq,
+                                       size_t packet_size_in_dwords,
+                                       unsigned int **buffer_ptr);
+
+       void    (*submit_packet)(struct kernel_queue *kq);
+       int     (*sync_with_hw)(struct kernel_queue *kq,
+                               unsigned long timeout_ms);
+       void    (*rollback_packet)(struct kernel_queue *kq);
+
+       /* data */
+       struct kfd_dev          *dev;
+       struct mqd_manager      *mqd;
+       struct queue            *queue;
+       uint32_t                pending_wptr;
+       unsigned int            nop_packet;
+
+       struct kfd_mem_obj      *rptr_mem;
+       uint32_t                *rptr_kernel;
+       uint64_t                rptr_gpu_addr;
+       struct kfd_mem_obj      *wptr_mem;
+       uint32_t                *wptr_kernel;
+       uint64_t                wptr_gpu_addr;
+       struct kfd_mem_obj      *pq;
+       uint64_t                pq_gpu_addr;
+       uint32_t                *pq_kernel_addr;
+
+       struct kfd_mem_obj      *fence_mem_obj;
+       uint64_t                fence_gpu_addr;
+       void                    *fence_kernel_address;
+
+       struct list_head        list;
+};
+
+#endif /* KFD_KERNEL_QUEUE_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
new file mode 100644 (file)
index 0000000..95d5af1
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include "kfd_priv.h"
+
+#define KFD_DRIVER_AUTHOR      "AMD Inc. and others"
+
+#define KFD_DRIVER_DESC                "Standalone HSA driver for AMD's GPUs"
+#define KFD_DRIVER_DATE                "20141113"
+#define KFD_DRIVER_MAJOR       0
+#define KFD_DRIVER_MINOR       7
+#define KFD_DRIVER_PATCHLEVEL  0
+
+const struct kfd2kgd_calls *kfd2kgd;
+static const struct kgd2kfd_calls kgd2kfd = {
+       .exit           = kgd2kfd_exit,
+       .probe          = kgd2kfd_probe,
+       .device_init    = kgd2kfd_device_init,
+       .device_exit    = kgd2kfd_device_exit,
+       .interrupt      = kgd2kfd_interrupt,
+       .suspend        = kgd2kfd_suspend,
+       .resume         = kgd2kfd_resume,
+};
+
+int sched_policy = KFD_SCHED_POLICY_HWS;
+module_param(sched_policy, int, 0444);
+MODULE_PARM_DESC(sched_policy,
+       "Kernel cmdline parameter that defines the amdkfd scheduling policy");
+
+int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT;
+module_param(max_num_of_processes, int, 0444);
+MODULE_PARM_DESC(max_num_of_processes,
+       "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes");
+
+int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
+module_param(max_num_of_queues_per_process, int, 0444);
+MODULE_PARM_DESC(max_num_of_queues_per_process,
+       "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
+
+bool kgd2kfd_init(unsigned interface_version,
+                 const struct kfd2kgd_calls *f2g,
+                 const struct kgd2kfd_calls **g2f)
+{
+       /*
+        * Only one interface version is supported,
+        * no kfd/kgd version skew allowed.
+        */
+       if (interface_version != KFD_INTERFACE_VERSION)
+               return false;
+
+       /* Protection against multiple amd kgd loads */
+       if (kfd2kgd)
+               return true;
+
+       kfd2kgd = f2g;
+       *g2f = &kgd2kfd;
+
+       return true;
+}
+EXPORT_SYMBOL(kgd2kfd_init);
+
+void kgd2kfd_exit(void)
+{
+}
+
+static int __init kfd_module_init(void)
+{
+       int err;
+
+       kfd2kgd = NULL;
+
+       /* Verify module parameters */
+       if ((sched_policy < KFD_SCHED_POLICY_HWS) ||
+               (sched_policy > KFD_SCHED_POLICY_NO_HWS)) {
+               pr_err("kfd: sched_policy has invalid value\n");
+               return -1;
+       }
+
+       /* Verify module parameters */
+       if ((max_num_of_processes < 0) ||
+               (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) {
+               pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n");
+               return -1;
+       }
+
+       if ((max_num_of_queues_per_process < 0) ||
+               (max_num_of_queues_per_process >
+                       KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
+               pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
+               return -1;
+       }
+
+       err = kfd_pasid_init();
+       if (err < 0)
+               goto err_pasid;
+
+       err = kfd_chardev_init();
+       if (err < 0)
+               goto err_ioctl;
+
+       err = kfd_topology_init();
+       if (err < 0)
+               goto err_topology;
+
+       kfd_process_create_wq();
+
+       dev_info(kfd_device, "Initialized module\n");
+
+       return 0;
+
+err_topology:
+       kfd_chardev_exit();
+err_ioctl:
+       kfd_pasid_exit();
+err_pasid:
+       return err;
+}
+
+static void __exit kfd_module_exit(void)
+{
+       kfd_process_destroy_wq();
+       kfd_topology_shutdown();
+       kfd_chardev_exit();
+       kfd_pasid_exit();
+       dev_info(kfd_device, "Removed module\n");
+}
+
+module_init(kfd_module_init);
+module_exit(kfd_module_exit);
+
+MODULE_AUTHOR(KFD_DRIVER_AUTHOR);
+MODULE_DESCRIPTION(KFD_DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+MODULE_VERSION(__stringify(KFD_DRIVER_MAJOR) "."
+              __stringify(KFD_DRIVER_MINOR) "."
+              __stringify(KFD_DRIVER_PATCHLEVEL));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
new file mode 100644 (file)
index 0000000..adc3147
--- /dev/null
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include "kfd_priv.h"
+#include "kfd_mqd_manager.h"
+#include "cik_regs.h"
+#include "../../radeon/cik_reg.h"
+
+inline void busy_wait(unsigned long ms)
+{
+       while (time_before(jiffies, ms))
+               cpu_relax();
+}
+
+static inline struct cik_mqd *get_mqd(void *mqd)
+{
+       return (struct cik_mqd *)mqd;
+}
+
+static int init_mqd(struct mqd_manager *mm, void **mqd,
+               struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+               struct queue_properties *q)
+{
+       uint64_t addr;
+       struct cik_mqd *m;
+       int retval;
+
+       BUG_ON(!mm || !q || !mqd);
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       retval = kfd2kgd->allocate_mem(mm->dev->kgd,
+                                       sizeof(struct cik_mqd),
+                                       256,
+                                       KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+                                       (struct kgd_mem **) mqd_mem_obj);
+
+       if (retval != 0)
+               return -ENOMEM;
+
+       m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
+       addr = (*mqd_mem_obj)->gpu_addr;
+
+       memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
+
+       m->header = 0xC0310800;
+       m->compute_pipelinestat_enable = 1;
+       m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
+       m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
+       m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
+       m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+
+       /*
+        * Make sure to use the last queue state saved on mqd when the cp
+        * reassigns the queue, so when queue is switched on/off (e.g over
+        * subscription or quantum timeout) the context will be consistent
+        */
+       m->cp_hqd_persistent_state =
+                               DEFAULT_CP_HQD_PERSISTENT_STATE | PRELOAD_REQ;
+
+       m->cp_mqd_control             = MQD_CONTROL_PRIV_STATE_EN;
+       m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
+       m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
+
+       m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE | IB_ATC_EN;
+       /* Although WinKFD writes this, I suspect it should not be necessary */
+       m->cp_hqd_ib_control = IB_ATC_EN | DEFAULT_MIN_IB_AVAIL_SIZE;
+
+       m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
+                               QUANTUM_DURATION(10);
+
+       /*
+        * Pipe Priority
+        * Identifies the pipe relative priority when this queue is connected
+        * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
+        * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
+        * 0 = CS_LOW (typically below GFX)
+        * 1 = CS_MEDIUM (typically between HP3D and GFX
+        * 2 = CS_HIGH (typically above HP3D)
+        */
+       m->cp_hqd_pipe_priority = 1;
+       m->cp_hqd_queue_priority = 15;
+
+       *mqd = m;
+       if (gart_addr != NULL)
+               *gart_addr = addr;
+       retval = mm->update_mqd(mm, m, q);
+
+       return retval;
+}
+
+static void uninit_mqd(struct mqd_manager *mm, void *mqd,
+                       struct kfd_mem_obj *mqd_mem_obj)
+{
+       BUG_ON(!mm || !mqd);
+       kfd2kgd->free_mem(mm->dev->kgd, (struct kgd_mem *) mqd_mem_obj);
+}
+
+static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
+                       uint32_t queue_id, uint32_t __user *wptr)
+{
+       return kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+
+}
+
+static int update_mqd(struct mqd_manager *mm, void *mqd,
+                       struct queue_properties *q)
+{
+       struct cik_mqd *m;
+
+       BUG_ON(!mm || !q || !mqd);
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       m = get_mqd(mqd);
+       m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
+                               DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN;
+
+       /*
+        * Calculating queue size which is log base 2 of actual queue size -1
+        * dwords and another -1 for ffs
+        */
+       m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
+                                                               - 1 - 1;
+       m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+       m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
+       m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+       m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+       m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
+                                       DOORBELL_OFFSET(q->doorbell_off);
+
+       m->cp_hqd_vmid = q->vmid;
+
+       if (q->format == KFD_QUEUE_FORMAT_AQL) {
+               m->cp_hqd_iq_rptr = AQL_ENABLE;
+               m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
+       }
+
+       m->cp_hqd_active = 0;
+       q->is_active = false;
+       if (q->queue_size > 0 &&
+                       q->queue_address != 0 &&
+                       q->queue_percent > 0) {
+               m->cp_hqd_active = 1;
+               q->is_active = true;
+       }
+
+       return 0;
+}
+
+static int destroy_mqd(struct mqd_manager *mm, void *mqd,
+                       enum kfd_preempt_type type,
+                       unsigned int timeout, uint32_t pipe_id,
+                       uint32_t queue_id)
+{
+       return kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
+                                       pipe_id, queue_id);
+}
+
+static bool is_occupied(struct mqd_manager *mm, void *mqd,
+                       uint64_t queue_address, uint32_t pipe_id,
+                       uint32_t queue_id)
+{
+
+       return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address,
+                                       pipe_id, queue_id);
+
+}
+
+/*
+ * HIQ MQD Implementation, concrete implementation for HIQ MQD implementation.
+ * The HIQ queue in Kaveri is using the same MQD structure as all the user mode
+ * queues but with different initial values.
+ */
+
+static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
+               struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+               struct queue_properties *q)
+{
+       uint64_t addr;
+       struct cik_mqd *m;
+       int retval;
+
+       BUG_ON(!mm || !q || !mqd || !mqd_mem_obj);
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       retval = kfd2kgd->allocate_mem(mm->dev->kgd,
+                                       sizeof(struct cik_mqd),
+                                       256,
+                                       KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+                                       (struct kgd_mem **) mqd_mem_obj);
+
+       if (retval != 0)
+               return -ENOMEM;
+
+       m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
+       addr = (*mqd_mem_obj)->gpu_addr;
+
+       memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
+
+       m->header = 0xC0310800;
+       m->compute_pipelinestat_enable = 1;
+       m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
+       m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
+       m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
+       m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+
+       m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
+                                       PRELOAD_REQ;
+       m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
+                               QUANTUM_DURATION(10);
+
+       m->cp_mqd_control             = MQD_CONTROL_PRIV_STATE_EN;
+       m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
+       m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
+
+       m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
+
+       /*
+        * Pipe Priority
+        * Identifies the pipe relative priority when this queue is connected
+        * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
+        * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
+        * 0 = CS_LOW (typically below GFX)
+        * 1 = CS_MEDIUM (typically between HP3D and GFX
+        * 2 = CS_HIGH (typically above HP3D)
+        */
+       m->cp_hqd_pipe_priority = 1;
+       m->cp_hqd_queue_priority = 15;
+
+       *mqd = m;
+       if (gart_addr)
+               *gart_addr = addr;
+       retval = mm->update_mqd(mm, m, q);
+
+       return retval;
+}
+
+static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
+                               struct queue_properties *q)
+{
+       struct cik_mqd *m;
+
+       BUG_ON(!mm || !q || !mqd);
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       m = get_mqd(mqd);
+       m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
+                               DEFAULT_MIN_AVAIL_SIZE |
+                               PRIV_STATE |
+                               KMD_QUEUE;
+
+       /*
+        * Calculating queue size which is log base 2 of actual queue
+        * size -1 dwords
+        */
+       m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
+                                                               - 1 - 1;
+       m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+       m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
+       m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+       m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+       m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
+                                       DOORBELL_OFFSET(q->doorbell_off);
+
+       m->cp_hqd_vmid = q->vmid;
+
+       m->cp_hqd_active = 0;
+       q->is_active = false;
+       if (q->queue_size > 0 &&
+                       q->queue_address != 0 &&
+                       q->queue_percent > 0) {
+               m->cp_hqd_active = 1;
+               q->is_active = true;
+       }
+
+       return 0;
+}
+
+struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
+                                       struct kfd_dev *dev)
+{
+       struct mqd_manager *mqd;
+
+       BUG_ON(!dev);
+       BUG_ON(type >= KFD_MQD_TYPE_MAX);
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
+       if (!mqd)
+               return NULL;
+
+       mqd->dev = dev;
+
+       switch (type) {
+       case KFD_MQD_TYPE_CIK_CP:
+       case KFD_MQD_TYPE_CIK_COMPUTE:
+               mqd->init_mqd = init_mqd;
+               mqd->uninit_mqd = uninit_mqd;
+               mqd->load_mqd = load_mqd;
+               mqd->update_mqd = update_mqd;
+               mqd->destroy_mqd = destroy_mqd;
+               mqd->is_occupied = is_occupied;
+               break;
+       case KFD_MQD_TYPE_CIK_HIQ:
+               mqd->init_mqd = init_mqd_hiq;
+               mqd->uninit_mqd = uninit_mqd;
+               mqd->load_mqd = load_mqd;
+               mqd->update_mqd = update_mqd_hiq;
+               mqd->destroy_mqd = destroy_mqd;
+               mqd->is_occupied = is_occupied;
+               break;
+       default:
+               kfree(mqd);
+               return NULL;
+       }
+
+       return mqd;
+}
+
+/* SDMA queues should be implemented here when the cp will supports them */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
new file mode 100644 (file)
index 0000000..213a71e
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef KFD_MQD_MANAGER_H_
+#define KFD_MQD_MANAGER_H_
+
+#include "kfd_priv.h"
+
+/**
+ * struct mqd_manager
+ *
+ * @init_mqd: Allocates the mqd buffer on local gpu memory and initialize it.
+ *
+ * @load_mqd: Loads the mqd to a concrete hqd slot. Used only for no cp
+ * scheduling mode.
+ *
+ * @update_mqd: Handles a update call for the MQD
+ *
+ * @destroy_mqd: Destroys the HQD slot and by that preempt the relevant queue.
+ * Used only for no cp scheduling.
+ *
+ * @uninit_mqd: Releases the mqd buffer from local gpu memory.
+ *
+ * @is_occupied: Checks if the relevant HQD slot is occupied.
+ *
+ * @mqd_mutex: Mqd manager mutex.
+ *
+ * @dev: The kfd device structure coupled with this module.
+ *
+ * MQD stands for Memory Queue Descriptor which represents the current queue
+ * state in the memory and initiate the HQD (Hardware Queue Descriptor) state.
+ * This structure is actually a base class for the different types of MQDs
+ * structures for the variant ASICs that should be supported in the future.
+ * This base class is also contains all the MQD specific operations.
+ * Another important thing to mention is that each queue has a MQD that keeps
+ * his state (or context) after each preemption or reassignment.
+ * Basically there are a instances of the mqd manager class per MQD type per
+ * ASIC. Currently the kfd driver supports only Kaveri so there are instances
+ * per KFD_MQD_TYPE for each device.
+ *
+ */
+
+struct mqd_manager {
+       int     (*init_mqd)(struct mqd_manager *mm, void **mqd,
+                       struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+                       struct queue_properties *q);
+
+       int     (*load_mqd)(struct mqd_manager *mm, void *mqd,
+                               uint32_t pipe_id, uint32_t queue_id,
+                               uint32_t __user *wptr);
+
+       int     (*update_mqd)(struct mqd_manager *mm, void *mqd,
+                               struct queue_properties *q);
+
+       int     (*destroy_mqd)(struct mqd_manager *mm, void *mqd,
+                               enum kfd_preempt_type type,
+                               unsigned int timeout, uint32_t pipe_id,
+                               uint32_t queue_id);
+
+       void    (*uninit_mqd)(struct mqd_manager *mm, void *mqd,
+                               struct kfd_mem_obj *mqd_mem_obj);
+
+       bool    (*is_occupied)(struct mqd_manager *mm, void *mqd,
+                               uint64_t queue_address, uint32_t pipe_id,
+                               uint32_t queue_id);
+
+       struct mutex    mqd_mutex;
+       struct kfd_dev  *dev;
+};
+
+#endif /* KFD_MQD_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
new file mode 100644 (file)
index 0000000..5ce9233
--- /dev/null
@@ -0,0 +1,565 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include "kfd_device_queue_manager.h"
+#include "kfd_kernel_queue.h"
+#include "kfd_priv.h"
+#include "kfd_pm4_headers.h"
+#include "kfd_pm4_opcodes.h"
+
+static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
+                               unsigned int buffer_size_bytes)
+{
+       unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
+
+       BUG_ON((temp * sizeof(uint32_t)) > buffer_size_bytes);
+       *wptr = temp;
+}
+
+static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
+{
+       union PM4_MES_TYPE_3_HEADER header;
+
+       header.u32all = 0;
+       header.opcode = opcode;
+       header.count = packet_size/sizeof(uint32_t) - 2;
+       header.type = PM4_TYPE_3;
+
+       return header.u32all;
+}
+
+static void pm_calc_rlib_size(struct packet_manager *pm,
+                               unsigned int *rlib_size,
+                               bool *over_subscription)
+{
+       unsigned int process_count, queue_count;
+
+       BUG_ON(!pm || !rlib_size || !over_subscription);
+
+       process_count = pm->dqm->processes_count;
+       queue_count = pm->dqm->queue_count;
+
+       /* check if there is over subscription*/
+       *over_subscription = false;
+       if ((process_count > 1) ||
+               queue_count > PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE) {
+               *over_subscription = true;
+               pr_debug("kfd: over subscribed runlist\n");
+       }
+
+       /* calculate run list ib allocation size */
+       *rlib_size = process_count * sizeof(struct pm4_map_process) +
+                    queue_count * sizeof(struct pm4_map_queues);
+
+       /*
+        * Increase the allocation size in case we need a chained run list
+        * when over subscription
+        */
+       if (*over_subscription)
+               *rlib_size += sizeof(struct pm4_runlist);
+
+       pr_debug("kfd: runlist ib size %d\n", *rlib_size);
+}
+
+static int pm_allocate_runlist_ib(struct packet_manager *pm,
+                               unsigned int **rl_buffer,
+                               uint64_t *rl_gpu_buffer,
+                               unsigned int *rl_buffer_size,
+                               bool *is_over_subscription)
+{
+       int retval;
+
+       BUG_ON(!pm);
+       BUG_ON(pm->allocated == true);
+       BUG_ON(is_over_subscription == NULL);
+
+       pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
+
+       retval = kfd2kgd->allocate_mem(pm->dqm->dev->kgd,
+                                       *rl_buffer_size,
+                                       PAGE_SIZE,
+                                       KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+                                       (struct kgd_mem **) &pm->ib_buffer_obj);
+
+       if (retval != 0) {
+               pr_err("kfd: failed to allocate runlist IB\n");
+               return retval;
+       }
+
+       *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
+       *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
+
+       memset(*rl_buffer, 0, *rl_buffer_size);
+       pm->allocated = true;
+       return retval;
+}
+
+static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
+                       uint64_t ib, size_t ib_size_in_dwords, bool chain)
+{
+       struct pm4_runlist *packet;
+
+       BUG_ON(!pm || !buffer || !ib);
+
+       packet = (struct pm4_runlist *)buffer;
+
+       memset(buffer, 0, sizeof(struct pm4_runlist));
+       packet->header.u32all = build_pm4_header(IT_RUN_LIST,
+                                               sizeof(struct pm4_runlist));
+
+       packet->bitfields4.ib_size = ib_size_in_dwords;
+       packet->bitfields4.chain = chain ? 1 : 0;
+       packet->bitfields4.offload_polling = 0;
+       packet->bitfields4.valid = 1;
+       packet->ordinal2 = lower_32_bits(ib);
+       packet->bitfields3.ib_base_hi = upper_32_bits(ib);
+
+       return 0;
+}
+
+static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
+                               struct qcm_process_device *qpd)
+{
+       struct pm4_map_process *packet;
+       struct queue *cur;
+       uint32_t num_queues;
+
+       BUG_ON(!pm || !buffer || !qpd);
+
+       packet = (struct pm4_map_process *)buffer;
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       memset(buffer, 0, sizeof(struct pm4_map_process));
+
+       packet->header.u32all = build_pm4_header(IT_MAP_PROCESS,
+                                       sizeof(struct pm4_map_process));
+       packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
+       packet->bitfields2.process_quantum = 1;
+       packet->bitfields2.pasid = qpd->pqm->process->pasid;
+       packet->bitfields3.page_table_base = qpd->page_table_base;
+       packet->bitfields10.gds_size = qpd->gds_size;
+       packet->bitfields10.num_gws = qpd->num_gws;
+       packet->bitfields10.num_oac = qpd->num_oac;
+       num_queues = 0;
+       list_for_each_entry(cur, &qpd->queues_list, list)
+               num_queues++;
+       packet->bitfields10.num_queues = num_queues;
+
+       packet->sh_mem_config = qpd->sh_mem_config;
+       packet->sh_mem_bases = qpd->sh_mem_bases;
+       packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
+       packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
+
+       packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
+       packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
+
+       return 0;
+}
+
+static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
+                               struct queue *q)
+{
+       struct pm4_map_queues *packet;
+
+       BUG_ON(!pm || !buffer || !q);
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       packet = (struct pm4_map_queues *)buffer;
+       memset(buffer, 0, sizeof(struct pm4_map_queues));
+
+       packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
+                                               sizeof(struct pm4_map_queues));
+       packet->bitfields2.alloc_format =
+                               alloc_format__mes_map_queues__one_per_pipe;
+       packet->bitfields2.num_queues = 1;
+       packet->bitfields2.queue_sel =
+               queue_sel__mes_map_queues__map_to_hws_determined_queue_slots;
+
+       packet->bitfields2.vidmem = (q->properties.is_interop) ?
+                       vidmem__mes_map_queues__uses_video_memory :
+                       vidmem__mes_map_queues__uses_no_video_memory;
+
+       switch (q->properties.type) {
+       case KFD_QUEUE_TYPE_COMPUTE:
+       case KFD_QUEUE_TYPE_DIQ:
+               packet->bitfields2.engine_sel =
+                               engine_sel__mes_map_queues__compute;
+               break;
+       case KFD_QUEUE_TYPE_SDMA:
+               packet->bitfields2.engine_sel =
+                               engine_sel__mes_map_queues__sdma0;
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset =
+                       q->properties.doorbell_off;
+
+       packet->mes_map_queues_ordinals[0].mqd_addr_lo =
+                       lower_32_bits(q->gart_mqd_addr);
+
+       packet->mes_map_queues_ordinals[0].mqd_addr_hi =
+                       upper_32_bits(q->gart_mqd_addr);
+
+       packet->mes_map_queues_ordinals[0].wptr_addr_lo =
+                       lower_32_bits((uint64_t)q->properties.write_ptr);
+
+       packet->mes_map_queues_ordinals[0].wptr_addr_hi =
+                       upper_32_bits((uint64_t)q->properties.write_ptr);
+
+       return 0;
+}
+
+static int pm_create_runlist_ib(struct packet_manager *pm,
+                               struct list_head *queues,
+                               uint64_t *rl_gpu_addr,
+                               size_t *rl_size_bytes)
+{
+       unsigned int alloc_size_bytes;
+       unsigned int *rl_buffer, rl_wptr, i;
+       int retval, proccesses_mapped;
+       struct device_process_node *cur;
+       struct qcm_process_device *qpd;
+       struct queue *q;
+       struct kernel_queue *kq;
+       bool is_over_subscription;
+
+       BUG_ON(!pm || !queues || !rl_size_bytes || !rl_gpu_addr);
+
+       rl_wptr = retval = proccesses_mapped = 0;
+
+       retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
+                               &alloc_size_bytes, &is_over_subscription);
+       if (retval != 0)
+               return retval;
+
+       *rl_size_bytes = alloc_size_bytes;
+
+       pr_debug("kfd: In func %s\n", __func__);
+       pr_debug("kfd: building runlist ib process count: %d queues count %d\n",
+               pm->dqm->processes_count, pm->dqm->queue_count);
+
+       /* build the run list ib packet */
+       list_for_each_entry(cur, queues, list) {
+               qpd = cur->qpd;
+               /* build map process packet */
+               if (proccesses_mapped >= pm->dqm->processes_count) {
+                       pr_debug("kfd: not enough space left in runlist IB\n");
+                       pm_release_ib(pm);
+                       return -ENOMEM;
+               }
+               retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
+               if (retval != 0)
+                       return retval;
+               proccesses_mapped++;
+               inc_wptr(&rl_wptr, sizeof(struct pm4_map_process),
+                               alloc_size_bytes);
+
+               list_for_each_entry(kq, &qpd->priv_queue_list, list) {
+                       if (kq->queue->properties.is_active != true)
+                               continue;
+                       retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
+                                                       kq->queue);
+                       if (retval != 0)
+                               return retval;
+                       inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
+                                       alloc_size_bytes);
+               }
+
+               list_for_each_entry(q, &qpd->queues_list, list) {
+                       if (q->properties.is_active != true)
+                               continue;
+                       retval = pm_create_map_queue(pm,
+                                               &rl_buffer[rl_wptr], q);
+                       if (retval != 0)
+                               return retval;
+                       inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
+                                       alloc_size_bytes);
+               }
+       }
+
+       pr_debug("kfd: finished map process and queues to runlist\n");
+
+       if (is_over_subscription)
+               pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr,
+                               alloc_size_bytes / sizeof(uint32_t), true);
+
+       for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
+               pr_debug("0x%2X ", rl_buffer[i]);
+       pr_debug("\n");
+
+       return 0;
+}
+
+int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
+{
+       BUG_ON(!dqm);
+
+       pm->dqm = dqm;
+       mutex_init(&pm->lock);
+       pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
+       if (pm->priv_queue == NULL) {
+               mutex_destroy(&pm->lock);
+               return -ENOMEM;
+       }
+       pm->allocated = false;
+
+       return 0;
+}
+
+void pm_uninit(struct packet_manager *pm)
+{
+       BUG_ON(!pm);
+
+       mutex_destroy(&pm->lock);
+       kernel_queue_uninit(pm->priv_queue);
+}
+
+int pm_send_set_resources(struct packet_manager *pm,
+                               struct scheduling_resources *res)
+{
+       struct pm4_set_resources *packet;
+
+       BUG_ON(!pm || !res);
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       mutex_lock(&pm->lock);
+       pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
+                                       sizeof(*packet) / sizeof(uint32_t),
+                       (unsigned int **)&packet);
+       if (packet == NULL) {
+               mutex_unlock(&pm->lock);
+               pr_err("kfd: failed to allocate buffer on kernel queue\n");
+               return -ENOMEM;
+       }
+
+       memset(packet, 0, sizeof(struct pm4_set_resources));
+       packet->header.u32all = build_pm4_header(IT_SET_RESOURCES,
+                                       sizeof(struct pm4_set_resources));
+
+       packet->bitfields2.queue_type =
+                       queue_type__mes_set_resources__hsa_interface_queue_hiq;
+       packet->bitfields2.vmid_mask = res->vmid_mask;
+       packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY;
+       packet->bitfields7.oac_mask = res->oac_mask;
+       packet->bitfields8.gds_heap_base = res->gds_heap_base;
+       packet->bitfields8.gds_heap_size = res->gds_heap_size;
+
+       packet->gws_mask_lo = lower_32_bits(res->gws_mask);
+       packet->gws_mask_hi = upper_32_bits(res->gws_mask);
+
+       packet->queue_mask_lo = lower_32_bits(res->queue_mask);
+       packet->queue_mask_hi = upper_32_bits(res->queue_mask);
+
+       pm->priv_queue->submit_packet(pm->priv_queue);
+       pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+
+       mutex_unlock(&pm->lock);
+
+       return 0;
+}
+
+int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
+{
+       uint64_t rl_gpu_ib_addr;
+       uint32_t *rl_buffer;
+       size_t rl_ib_size, packet_size_dwords;
+       int retval;
+
+       BUG_ON(!pm || !dqm_queues);
+
+       retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
+                                       &rl_ib_size);
+       if (retval != 0)
+               goto fail_create_runlist_ib;
+
+       pr_debug("kfd: runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
+
+       packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
+       mutex_lock(&pm->lock);
+
+       retval = pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
+                                       packet_size_dwords, &rl_buffer);
+       if (retval != 0)
+               goto fail_acquire_packet_buffer;
+
+       retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
+                                       rl_ib_size / sizeof(uint32_t), false);
+       if (retval != 0)
+               goto fail_create_runlist;
+
+       pm->priv_queue->submit_packet(pm->priv_queue);
+       pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+
+       mutex_unlock(&pm->lock);
+
+       return retval;
+
+fail_create_runlist:
+       pm->priv_queue->rollback_packet(pm->priv_queue);
+fail_acquire_packet_buffer:
+       mutex_unlock(&pm->lock);
+fail_create_runlist_ib:
+       if (pm->allocated == true)
+               pm_release_ib(pm);
+       return retval;
+}
+
+int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
+                       uint32_t fence_value)
+{
+       int retval;
+       struct pm4_query_status *packet;
+
+       BUG_ON(!pm || !fence_address);
+
+       mutex_lock(&pm->lock);
+       retval = pm->priv_queue->acquire_packet_buffer(
+                       pm->priv_queue,
+                       sizeof(struct pm4_query_status) / sizeof(uint32_t),
+                       (unsigned int **)&packet);
+       if (retval != 0)
+               goto fail_acquire_packet_buffer;
+
+       packet->header.u32all = build_pm4_header(IT_QUERY_STATUS,
+                                       sizeof(struct pm4_query_status));
+
+       packet->bitfields2.context_id = 0;
+       packet->bitfields2.interrupt_sel =
+                       interrupt_sel__mes_query_status__completion_status;
+       packet->bitfields2.command =
+                       command__mes_query_status__fence_only_after_write_ack;
+
+       packet->addr_hi = upper_32_bits((uint64_t)fence_address);
+       packet->addr_lo = lower_32_bits((uint64_t)fence_address);
+       packet->data_hi = upper_32_bits((uint64_t)fence_value);
+       packet->data_lo = lower_32_bits((uint64_t)fence_value);
+
+       pm->priv_queue->submit_packet(pm->priv_queue);
+       pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+       mutex_unlock(&pm->lock);
+
+       return 0;
+
+fail_acquire_packet_buffer:
+       mutex_unlock(&pm->lock);
+       return retval;
+}
+
+int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
+                       enum kfd_preempt_type_filter mode,
+                       uint32_t filter_param, bool reset,
+                       unsigned int sdma_engine)
+{
+       int retval;
+       uint32_t *buffer;
+       struct pm4_unmap_queues *packet;
+
+       BUG_ON(!pm);
+
+       mutex_lock(&pm->lock);
+       retval = pm->priv_queue->acquire_packet_buffer(
+                       pm->priv_queue,
+                       sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
+                       &buffer);
+       if (retval != 0)
+               goto err_acquire_packet_buffer;
+
+       packet = (struct pm4_unmap_queues *)buffer;
+       memset(buffer, 0, sizeof(struct pm4_unmap_queues));
+
+       packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES,
+                                       sizeof(struct pm4_unmap_queues));
+       switch (type) {
+       case KFD_QUEUE_TYPE_COMPUTE:
+       case KFD_QUEUE_TYPE_DIQ:
+               packet->bitfields2.engine_sel =
+                       engine_sel__mes_unmap_queues__compute;
+               break;
+       case KFD_QUEUE_TYPE_SDMA:
+               packet->bitfields2.engine_sel =
+                       engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       if (reset)
+               packet->bitfields2.action =
+                               action__mes_unmap_queues__reset_queues;
+       else
+               packet->bitfields2.action =
+                               action__mes_unmap_queues__preempt_queues;
+
+       switch (mode) {
+       case KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE:
+               packet->bitfields2.queue_sel =
+                               queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
+               packet->bitfields2.num_queues = 1;
+               packet->bitfields3b.doorbell_offset0 = filter_param;
+               break;
+       case KFD_PREEMPT_TYPE_FILTER_BY_PASID:
+               packet->bitfields2.queue_sel =
+                               queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
+               packet->bitfields3a.pasid = filter_param;
+               break;
+       case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
+               packet->bitfields2.queue_sel =
+                               queue_sel__mes_unmap_queues__perform_request_on_all_active_queues;
+               break;
+       default:
+               BUG();
+               break;
+       };
+
+       pm->priv_queue->submit_packet(pm->priv_queue);
+       pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+
+       mutex_unlock(&pm->lock);
+       return 0;
+
+err_acquire_packet_buffer:
+       mutex_unlock(&pm->lock);
+       return retval;
+}
+
+void pm_release_ib(struct packet_manager *pm)
+{
+       BUG_ON(!pm);
+
+       mutex_lock(&pm->lock);
+       if (pm->allocated) {
+               kfd2kgd->free_mem(pm->dqm->dev->kgd,
+                               (struct kgd_mem *) pm->ib_buffer_obj);
+               pm->allocated = false;
+       }
+       mutex_unlock(&pm->lock);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
new file mode 100644 (file)
index 0000000..71699ad
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "kfd_priv.h"
+
+static unsigned long *pasid_bitmap;
+static unsigned int pasid_limit;
+static DEFINE_MUTEX(pasid_mutex);
+
+int kfd_pasid_init(void)
+{
+       pasid_limit = max_num_of_processes;
+
+       pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL);
+       if (!pasid_bitmap)
+               return -ENOMEM;
+
+       set_bit(0, pasid_bitmap); /* PASID 0 is reserved. */
+
+       return 0;
+}
+
+void kfd_pasid_exit(void)
+{
+       kfree(pasid_bitmap);
+}
+
+bool kfd_set_pasid_limit(unsigned int new_limit)
+{
+       if (new_limit < pasid_limit) {
+               bool ok;
+
+               mutex_lock(&pasid_mutex);
+
+               /* ensure that no pasids >= new_limit are in-use */
+               ok = (find_next_bit(pasid_bitmap, pasid_limit, new_limit) ==
+                                                               pasid_limit);
+               if (ok)
+                       pasid_limit = new_limit;
+
+               mutex_unlock(&pasid_mutex);
+
+               return ok;
+       }
+
+       return true;
+}
+
+inline unsigned int kfd_get_pasid_limit(void)
+{
+       return pasid_limit;
+}
+
+unsigned int kfd_pasid_alloc(void)
+{
+       unsigned int found;
+
+       mutex_lock(&pasid_mutex);
+
+       found = find_first_zero_bit(pasid_bitmap, pasid_limit);
+       if (found == pasid_limit)
+               found = 0;
+       else
+               set_bit(found, pasid_bitmap);
+
+       mutex_unlock(&pasid_mutex);
+
+       return found;
+}
+
+void kfd_pasid_free(unsigned int pasid)
+{
+       BUG_ON(pasid == 0 || pasid >= pasid_limit);
+       clear_bit(pasid, pasid_bitmap);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
new file mode 100644 (file)
index 0000000..071ad57
--- /dev/null
@@ -0,0 +1,405 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef KFD_PM4_HEADERS_H_
+#define KFD_PM4_HEADERS_H_
+
+#ifndef PM4_MES_HEADER_DEFINED
+#define PM4_MES_HEADER_DEFINED
+union PM4_MES_TYPE_3_HEADER {
+       struct {
+               uint32_t reserved1:8;   /* < reserved */
+               uint32_t opcode:8;      /* < IT opcode */
+               uint32_t count:14;      /* < number of DWORDs - 1
+                                        * in the information body.
+                                        */
+               uint32_t type:2;        /* < packet identifier.
+                                        * It should be 3 for type 3 packets
+                                        */
+       };
+       uint32_t u32all;
+};
+#endif /* PM4_MES_HEADER_DEFINED */
+
+/* --------------------MES_SET_RESOURCES-------------------- */
+
+#ifndef PM4_MES_SET_RESOURCES_DEFINED
+#define PM4_MES_SET_RESOURCES_DEFINED
+enum set_resources_queue_type_enum {
+       queue_type__mes_set_resources__kernel_interface_queue_kiq = 0,
+       queue_type__mes_set_resources__hsa_interface_queue_hiq = 1,
+       queue_type__mes_set_resources__hsa_debug_interface_queue = 4
+};
+
+struct pm4_set_resources {
+       union {
+               union PM4_MES_TYPE_3_HEADER header;     /* header */
+               uint32_t ordinal1;
+       };
+
+       union {
+               struct {
+                       uint32_t vmid_mask:16;
+                       uint32_t unmap_latency:8;
+                       uint32_t reserved1:5;
+                       enum set_resources_queue_type_enum queue_type:3;
+               } bitfields2;
+               uint32_t ordinal2;
+       };
+
+       uint32_t queue_mask_lo;
+       uint32_t queue_mask_hi;
+       uint32_t gws_mask_lo;
+       uint32_t gws_mask_hi;
+
+       union {
+               struct {
+                       uint32_t oac_mask:16;
+                       uint32_t reserved2:16;
+               } bitfields7;
+               uint32_t ordinal7;
+       };
+
+       union {
+               struct {
+                       uint32_t gds_heap_base:6;
+                       uint32_t reserved3:5;
+                       uint32_t gds_heap_size:6;
+                       uint32_t reserved4:15;
+               } bitfields8;
+               uint32_t ordinal8;
+       };
+
+};
+#endif
+
+/*--------------------MES_RUN_LIST-------------------- */
+
+#ifndef PM4_MES_RUN_LIST_DEFINED
+#define PM4_MES_RUN_LIST_DEFINED
+
+struct pm4_runlist {
+       union {
+               union PM4_MES_TYPE_3_HEADER header;     /* header */
+               uint32_t ordinal1;
+       };
+
+       union {
+               struct {
+                       uint32_t reserved1:2;
+                       uint32_t ib_base_lo:30;
+               } bitfields2;
+               uint32_t ordinal2;
+       };
+
+       union {
+               struct {
+                       uint32_t ib_base_hi:16;
+                       uint32_t reserved2:16;
+               } bitfields3;
+               uint32_t ordinal3;
+       };
+
+       union {
+               struct {
+                       uint32_t ib_size:20;
+                       uint32_t chain:1;
+                       uint32_t offload_polling:1;
+                       uint32_t reserved3:1;
+                       uint32_t valid:1;
+                       uint32_t reserved4:8;
+               } bitfields4;
+               uint32_t ordinal4;
+       };
+
+};
+#endif
+
+/*--------------------MES_MAP_PROCESS-------------------- */
+
+#ifndef PM4_MES_MAP_PROCESS_DEFINED
+#define PM4_MES_MAP_PROCESS_DEFINED
+
+struct pm4_map_process {
+       union {
+               union PM4_MES_TYPE_3_HEADER header;     /* header */
+               uint32_t ordinal1;
+       };
+
+       union {
+               struct {
+                       uint32_t pasid:16;
+                       uint32_t reserved1:8;
+                       uint32_t diq_enable:1;
+                       uint32_t process_quantum:7;
+               } bitfields2;
+               uint32_t ordinal2;
+       };
+
+       union {
+               struct {
+                       uint32_t page_table_base:28;
+                       uint32_t reserved3:4;
+               } bitfields3;
+               uint32_t ordinal3;
+       };
+
+       uint32_t sh_mem_bases;
+       uint32_t sh_mem_ape1_base;
+       uint32_t sh_mem_ape1_limit;
+       uint32_t sh_mem_config;
+       uint32_t gds_addr_lo;
+       uint32_t gds_addr_hi;
+
+       union {
+               struct {
+                       uint32_t num_gws:6;
+                       uint32_t reserved4:2;
+                       uint32_t num_oac:4;
+                       uint32_t reserved5:4;
+                       uint32_t gds_size:6;
+                       uint32_t num_queues:10;
+               } bitfields10;
+               uint32_t ordinal10;
+       };
+
+};
+#endif
+
+/*--------------------MES_MAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_MAP_QUEUES_DEFINED
+#define PM4_MES_MAP_QUEUES_DEFINED
+enum map_queues_queue_sel_enum {
+       queue_sel__mes_map_queues__map_to_specified_queue_slots = 0,
+       queue_sel__mes_map_queues__map_to_hws_determined_queue_slots = 1,
+       queue_sel__mes_map_queues__enable_process_queues = 2
+};
+
+enum map_queues_vidmem_enum {
+       vidmem__mes_map_queues__uses_no_video_memory = 0,
+       vidmem__mes_map_queues__uses_video_memory = 1
+};
+
+enum map_queues_alloc_format_enum {
+       alloc_format__mes_map_queues__one_per_pipe = 0,
+       alloc_format__mes_map_queues__all_on_one_pipe = 1
+};
+
+enum map_queues_engine_sel_enum {
+       engine_sel__mes_map_queues__compute = 0,
+       engine_sel__mes_map_queues__sdma0 = 2,
+       engine_sel__mes_map_queues__sdma1 = 3
+};
+
+struct pm4_map_queues {
+       union {
+               union PM4_MES_TYPE_3_HEADER header;     /* header */
+               uint32_t ordinal1;
+       };
+
+       union {
+               struct {
+                       uint32_t reserved1:4;
+                       enum map_queues_queue_sel_enum queue_sel:2;
+                       uint32_t reserved2:2;
+                       uint32_t vmid:4;
+                       uint32_t reserved3:4;
+                       enum map_queues_vidmem_enum vidmem:2;
+                       uint32_t reserved4:6;
+                       enum map_queues_alloc_format_enum alloc_format:2;
+                       enum map_queues_engine_sel_enum engine_sel:3;
+                       uint32_t num_queues:3;
+               } bitfields2;
+               uint32_t ordinal2;
+       };
+
+       struct {
+               union {
+                       struct {
+                               uint32_t reserved5:2;
+                               uint32_t doorbell_offset:21;
+                               uint32_t reserved6:3;
+                               uint32_t queue:6;
+                       } bitfields3;
+                       uint32_t ordinal3;
+               };
+
+               uint32_t mqd_addr_lo;
+               uint32_t mqd_addr_hi;
+               uint32_t wptr_addr_lo;
+               uint32_t wptr_addr_hi;
+
+       } mes_map_queues_ordinals[1];   /* 1..N of these ordinal groups */
+
+};
+#endif
+
+/*--------------------MES_QUERY_STATUS--------------------*/
+
+#ifndef PM4_MES_QUERY_STATUS_DEFINED
+#define PM4_MES_QUERY_STATUS_DEFINED
+enum query_status_interrupt_sel_enum {
+       interrupt_sel__mes_query_status__completion_status = 0,
+       interrupt_sel__mes_query_status__process_status = 1,
+       interrupt_sel__mes_query_status__queue_status = 2
+};
+
+enum query_status_command_enum {
+       command__mes_query_status__interrupt_only = 0,
+       command__mes_query_status__fence_only_immediate = 1,
+       command__mes_query_status__fence_only_after_write_ack = 2,
+       command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3
+};
+
+enum query_status_engine_sel_enum {
+       engine_sel__mes_query_status__compute = 0,
+       engine_sel__mes_query_status__sdma0_queue = 2,
+       engine_sel__mes_query_status__sdma1_queue = 3
+};
+
+struct pm4_query_status {
+       union {
+               union PM4_MES_TYPE_3_HEADER header;     /* header */
+               uint32_t ordinal1;
+       };
+
+       union {
+               struct {
+                       uint32_t context_id:28;
+                       enum query_status_interrupt_sel_enum interrupt_sel:2;
+                       enum query_status_command_enum command:2;
+               } bitfields2;
+               uint32_t ordinal2;
+       };
+
+       union {
+               struct {
+                       uint32_t pasid:16;
+                       uint32_t reserved1:16;
+               } bitfields3a;
+               struct {
+                       uint32_t reserved2:2;
+                       uint32_t doorbell_offset:21;
+                       uint32_t reserved3:3;
+                       enum query_status_engine_sel_enum engine_sel:3;
+                       uint32_t reserved4:3;
+               } bitfields3b;
+               uint32_t ordinal3;
+       };
+
+       uint32_t addr_lo;
+       uint32_t addr_hi;
+       uint32_t data_lo;
+       uint32_t data_hi;
+};
+#endif
+
+/*--------------------MES_UNMAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_UNMAP_QUEUES_DEFINED
+#define PM4_MES_UNMAP_QUEUES_DEFINED
+enum unmap_queues_action_enum {
+       action__mes_unmap_queues__preempt_queues = 0,
+       action__mes_unmap_queues__reset_queues = 1,
+       action__mes_unmap_queues__disable_process_queues = 2
+};
+
+enum unmap_queues_queue_sel_enum {
+       queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0,
+       queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1,
+       queue_sel__mes_unmap_queues__perform_request_on_all_active_queues = 2
+};
+
+enum unmap_queues_engine_sel_enum {
+       engine_sel__mes_unmap_queues__compute = 0,
+       engine_sel__mes_unmap_queues__sdma0 = 2,
+       engine_sel__mes_unmap_queues__sdma1 = 3
+};
+
+struct pm4_unmap_queues {
+       union {
+               union PM4_MES_TYPE_3_HEADER header;     /* header */
+               uint32_t ordinal1;
+       };
+
+       union {
+               struct {
+                       enum unmap_queues_action_enum action:2;
+                       uint32_t reserved1:2;
+                       enum unmap_queues_queue_sel_enum queue_sel:2;
+                       uint32_t reserved2:20;
+                       enum unmap_queues_engine_sel_enum engine_sel:3;
+                       uint32_t num_queues:3;
+               } bitfields2;
+               uint32_t ordinal2;
+       };
+
+       union {
+               struct {
+                       uint32_t pasid:16;
+                       uint32_t reserved3:16;
+               } bitfields3a;
+               struct {
+                       uint32_t reserved4:2;
+                       uint32_t doorbell_offset0:21;
+                       uint32_t reserved5:9;
+               } bitfields3b;
+               uint32_t ordinal3;
+       };
+
+       union {
+               struct {
+                       uint32_t reserved6:2;
+                       uint32_t doorbell_offset1:21;
+                       uint32_t reserved7:9;
+               } bitfields4;
+               uint32_t ordinal4;
+       };
+
+       union {
+               struct {
+                       uint32_t reserved8:2;
+                       uint32_t doorbell_offset2:21;
+                       uint32_t reserved9:9;
+               } bitfields5;
+               uint32_t ordinal5;
+       };
+
+       union {
+               struct {
+                       uint32_t reserved10:2;
+                       uint32_t doorbell_offset3:21;
+                       uint32_t reserved11:9;
+               } bitfields6;
+               uint32_t ordinal6;
+       };
+
+};
+#endif
+
+enum {
+       CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014
+};
+
+#endif /* KFD_PM4_HEADERS_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h
new file mode 100644 (file)
index 0000000..b72fa3b
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+
+#ifndef KFD_PM4_OPCODES_H
+#define KFD_PM4_OPCODES_H
+
+enum it_opcode_type {
+       IT_NOP                               = 0x10,
+       IT_SET_BASE                          = 0x11,
+       IT_CLEAR_STATE                       = 0x12,
+       IT_INDEX_BUFFER_SIZE                 = 0x13,
+       IT_DISPATCH_DIRECT                   = 0x15,
+       IT_DISPATCH_INDIRECT                 = 0x16,
+       IT_ATOMIC_GDS                        = 0x1D,
+       IT_OCCLUSION_QUERY                   = 0x1F,
+       IT_SET_PREDICATION                   = 0x20,
+       IT_REG_RMW                           = 0x21,
+       IT_COND_EXEC                         = 0x22,
+       IT_PRED_EXEC                         = 0x23,
+       IT_DRAW_INDIRECT                     = 0x24,
+       IT_DRAW_INDEX_INDIRECT               = 0x25,
+       IT_INDEX_BASE                        = 0x26,
+       IT_DRAW_INDEX_2                      = 0x27,
+       IT_CONTEXT_CONTROL                   = 0x28,
+       IT_INDEX_TYPE                        = 0x2A,
+       IT_DRAW_INDIRECT_MULTI               = 0x2C,
+       IT_DRAW_INDEX_AUTO                   = 0x2D,
+       IT_NUM_INSTANCES                     = 0x2F,
+       IT_DRAW_INDEX_MULTI_AUTO             = 0x30,
+       IT_INDIRECT_BUFFER_CNST              = 0x33,
+       IT_STRMOUT_BUFFER_UPDATE             = 0x34,
+       IT_DRAW_INDEX_OFFSET_2               = 0x35,
+       IT_DRAW_PREAMBLE                     = 0x36,
+       IT_WRITE_DATA                        = 0x37,
+       IT_DRAW_INDEX_INDIRECT_MULTI         = 0x38,
+       IT_MEM_SEMAPHORE                     = 0x39,
+       IT_COPY_DW                           = 0x3B,
+       IT_WAIT_REG_MEM                      = 0x3C,
+       IT_INDIRECT_BUFFER                   = 0x3F,
+       IT_COPY_DATA                         = 0x40,
+       IT_PFP_SYNC_ME                       = 0x42,
+       IT_SURFACE_SYNC                      = 0x43,
+       IT_COND_WRITE                        = 0x45,
+       IT_EVENT_WRITE                       = 0x46,
+       IT_EVENT_WRITE_EOP                   = 0x47,
+       IT_EVENT_WRITE_EOS                   = 0x48,
+       IT_RELEASE_MEM                       = 0x49,
+       IT_PREAMBLE_CNTL                     = 0x4A,
+       IT_DMA_DATA                          = 0x50,
+       IT_ACQUIRE_MEM                       = 0x58,
+       IT_REWIND                            = 0x59,
+       IT_LOAD_UCONFIG_REG                  = 0x5E,
+       IT_LOAD_SH_REG                       = 0x5F,
+       IT_LOAD_CONFIG_REG                   = 0x60,
+       IT_LOAD_CONTEXT_REG                  = 0x61,
+       IT_SET_CONFIG_REG                    = 0x68,
+       IT_SET_CONTEXT_REG                   = 0x69,
+       IT_SET_CONTEXT_REG_INDIRECT          = 0x73,
+       IT_SET_SH_REG                        = 0x76,
+       IT_SET_SH_REG_OFFSET                 = 0x77,
+       IT_SET_QUEUE_REG                     = 0x78,
+       IT_SET_UCONFIG_REG                   = 0x79,
+       IT_SCRATCH_RAM_WRITE                 = 0x7D,
+       IT_SCRATCH_RAM_READ                  = 0x7E,
+       IT_LOAD_CONST_RAM                    = 0x80,
+       IT_WRITE_CONST_RAM                   = 0x81,
+       IT_DUMP_CONST_RAM                    = 0x83,
+       IT_INCREMENT_CE_COUNTER              = 0x84,
+       IT_INCREMENT_DE_COUNTER              = 0x85,
+       IT_WAIT_ON_CE_COUNTER                = 0x86,
+       IT_WAIT_ON_DE_COUNTER_DIFF           = 0x88,
+       IT_SWITCH_BUFFER                     = 0x8B,
+       IT_SET_RESOURCES                     = 0xA0,
+       IT_MAP_PROCESS                       = 0xA1,
+       IT_MAP_QUEUES                        = 0xA2,
+       IT_UNMAP_QUEUES                      = 0xA3,
+       IT_QUERY_STATUS                      = 0xA4,
+       IT_RUN_LIST                          = 0xA5,
+};
+
+#define PM4_TYPE_0 0
+#define PM4_TYPE_2 2
+#define PM4_TYPE_3 3
+
+#endif /* KFD_PM4_OPCODES_H */
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
new file mode 100644 (file)
index 0000000..f9fb81e
--- /dev/null
@@ -0,0 +1,600 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_PRIV_H_INCLUDED
+#define KFD_PRIV_H_INCLUDED
+
+#include <linux/hashtable.h>
+#include <linux/mmu_notifier.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/kfd_ioctl.h>
+#include <kgd_kfd_interface.h>
+
+#define KFD_SYSFS_FILE_MODE 0444
+
+/*
+ * When working with cp scheduler we should assign the HIQ manually or via
+ * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
+ * definitions for Kaveri. In Kaveri only the first ME queues participates
+ * in the cp scheduling taking that in mind we set the HIQ slot in the
+ * second ME.
+ */
+#define KFD_CIK_HIQ_PIPE 4
+#define KFD_CIK_HIQ_QUEUE 0
+
+/* GPU ID hash width in bits */
+#define KFD_GPU_ID_HASH_WIDTH 16
+
+/* Macro for allocating structures */
+#define kfd_alloc_struct(ptr_to_struct)        \
+       ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
+
+/* Kernel module parameter to specify maximum number of supported processes */
+extern int max_num_of_processes;
+
+#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
+#define KFD_MAX_NUM_OF_PROCESSES 512
+
+/*
+ * Kernel module parameter to specify maximum number of supported queues
+ * per process
+ */
+extern int max_num_of_queues_per_process;
+
+#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128
+#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
+
+#define KFD_KERNEL_QUEUE_SIZE 2048
+
+/* Kernel module parameter to specify the scheduling policy */
+extern int sched_policy;
+
+/**
+ * enum kfd_sched_policy
+ *
+ * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
+ * scheduling. In this scheduling mode we're using the firmware code to
+ * schedule the user mode queues and kernel queues such as HIQ and DIQ.
+ * the HIQ queue is used as a special queue that dispatches the configuration
+ * to the cp and the user mode queues list that are currently running.
+ * the DIQ queue is a debugging queue that dispatches debugging commands to the
+ * firmware.
+ * in this scheduling mode user mode queues over subscription feature is
+ * enabled.
+ *
+ * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
+ * subscription feature disabled.
+ *
+ * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
+ * set the command processor registers and sets the queues "manually". This
+ * mode is used *ONLY* for debugging proposes.
+ *
+ */
+enum kfd_sched_policy {
+       KFD_SCHED_POLICY_HWS = 0,
+       KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
+       KFD_SCHED_POLICY_NO_HWS
+};
+
+enum cache_policy {
+       cache_policy_coherent,
+       cache_policy_noncoherent
+};
+
+struct kfd_device_info {
+       unsigned int max_pasid_bits;
+       size_t ih_ring_entry_size;
+       uint16_t mqd_size_aligned;
+};
+
+struct kfd_dev {
+       struct kgd_dev *kgd;
+
+       const struct kfd_device_info *device_info;
+       struct pci_dev *pdev;
+
+       unsigned int id;                /* topology stub index */
+
+       phys_addr_t doorbell_base;      /* Start of actual doorbells used by
+                                        * KFD. It is aligned for mapping
+                                        * into user mode
+                                        */
+       size_t doorbell_id_offset;      /* Doorbell offset (from KFD doorbell
+                                        * to HW doorbell, GFX reserved some
+                                        * at the start)
+                                        */
+       size_t doorbell_process_limit;  /* Number of processes we have doorbell
+                                        * space for.
+                                        */
+       u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
+                                          * page used by kernel queue
+                                          */
+
+       struct kgd2kfd_shared_resources shared_resources;
+
+       void *interrupt_ring;
+       size_t interrupt_ring_size;
+       atomic_t interrupt_ring_rptr;
+       atomic_t interrupt_ring_wptr;
+       struct work_struct interrupt_work;
+       spinlock_t interrupt_lock;
+
+       /* QCM Device instance */
+       struct device_queue_manager *dqm;
+
+       bool init_complete;
+       /*
+        * Interrupts of interest to KFD are copied
+        * from the HW ring into a SW ring.
+        */
+       bool interrupts_active;
+};
+
+/* KGD2KFD callbacks */
+void kgd2kfd_exit(void);
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev);
+bool kgd2kfd_device_init(struct kfd_dev *kfd,
+                        const struct kgd2kfd_shared_resources *gpu_resources);
+void kgd2kfd_device_exit(struct kfd_dev *kfd);
+
+extern const struct kfd2kgd_calls *kfd2kgd;
+
+struct kfd_mem_obj {
+       void *bo;
+       uint64_t gpu_addr;
+       uint32_t *cpu_ptr;
+};
+
+enum kfd_mempool {
+       KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
+       KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
+       KFD_MEMPOOL_FRAMEBUFFER = 3,
+};
+
+/* Character device interface */
+int kfd_chardev_init(void);
+void kfd_chardev_exit(void);
+struct device *kfd_chardev(void);
+
+/**
+ * enum kfd_preempt_type_filter
+ *
+ * @KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE: Preempts single queue.
+ *
+ * @KFD_PRERMPT_TYPE_FILTER_ALL_QUEUES: Preempts all queues in the
+ *                                             running queues list.
+ *
+ * @KFD_PRERMPT_TYPE_FILTER_BY_PASID: Preempts queues that belongs to
+ *                                             specific process.
+ *
+ */
+enum kfd_preempt_type_filter {
+       KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE,
+       KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES,
+       KFD_PREEMPT_TYPE_FILTER_BY_PASID
+};
+
+enum kfd_preempt_type {
+       KFD_PREEMPT_TYPE_WAVEFRONT,
+       KFD_PREEMPT_TYPE_WAVEFRONT_RESET
+};
+
+/**
+ * enum kfd_queue_type
+ *
+ * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
+ *
+ * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type.
+ *
+ * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
+ *
+ * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
+ */
+enum kfd_queue_type  {
+       KFD_QUEUE_TYPE_COMPUTE,
+       KFD_QUEUE_TYPE_SDMA,
+       KFD_QUEUE_TYPE_HIQ,
+       KFD_QUEUE_TYPE_DIQ
+};
+
+enum kfd_queue_format {
+       KFD_QUEUE_FORMAT_PM4,
+       KFD_QUEUE_FORMAT_AQL
+};
+
+/**
+ * struct queue_properties
+ *
+ * @type: The queue type.
+ *
+ * @queue_id: Queue identifier.
+ *
+ * @queue_address: Queue ring buffer address.
+ *
+ * @queue_size: Queue ring buffer size.
+ *
+ * @priority: Defines the queue priority relative to other queues in the
+ * process.
+ * This is just an indication and HW scheduling may override the priority as
+ * necessary while keeping the relative prioritization.
+ * the priority granularity is from 0 to f which f is the highest priority.
+ * currently all queues are initialized with the highest priority.
+ *
+ * @queue_percent: This field is partially implemented and currently a zero in
+ * this field defines that the queue is non active.
+ *
+ * @read_ptr: User space address which points to the number of dwords the
+ * cp read from the ring buffer. This field updates automatically by the H/W.
+ *
+ * @write_ptr: Defines the number of dwords written to the ring buffer.
+ *
+ * @doorbell_ptr: This field aim is to notify the H/W of new packet written to
+ * the queue ring buffer. This field should be similar to write_ptr and the user
+ * should update this field after he updated the write_ptr.
+ *
+ * @doorbell_off: The doorbell offset in the doorbell pci-bar.
+ *
+ * @is_interop: Defines if this is a interop queue. Interop queue means that the
+ * queue can access both graphics and compute resources.
+ *
+ * @is_active: Defines if the queue is active or not.
+ *
+ * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
+ * of the queue.
+ *
+ * This structure represents the queue properties for each queue no matter if
+ * it's user mode or kernel mode queue.
+ *
+ */
+struct queue_properties {
+       enum kfd_queue_type type;
+       enum kfd_queue_format format;
+       unsigned int queue_id;
+       uint64_t queue_address;
+       uint64_t  queue_size;
+       uint32_t priority;
+       uint32_t queue_percent;
+       uint32_t *read_ptr;
+       uint32_t *write_ptr;
+       uint32_t __iomem *doorbell_ptr;
+       uint32_t doorbell_off;
+       bool is_interop;
+       bool is_active;
+       /* Not relevant for user mode queues in cp scheduling */
+       unsigned int vmid;
+};
+
+/**
+ * struct queue
+ *
+ * @list: Queue linked list.
+ *
+ * @mqd: The queue MQD.
+ *
+ * @mqd_mem_obj: The MQD local gpu memory object.
+ *
+ * @gart_mqd_addr: The MQD gart mc address.
+ *
+ * @properties: The queue properties.
+ *
+ * @mec: Used only in no cp scheduling mode and identifies to micro engine id
+ * that the queue should be execute on.
+ *
+ * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id.
+ *
+ * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
+ *
+ * @process: The kfd process that created this queue.
+ *
+ * @device: The kfd device that created this queue.
+ *
+ * This structure represents user mode compute queues.
+ * It contains all the necessary data to handle such queues.
+ *
+ */
+
+struct queue {
+       struct list_head list;
+       void *mqd;
+       struct kfd_mem_obj *mqd_mem_obj;
+       uint64_t gart_mqd_addr;
+       struct queue_properties properties;
+
+       uint32_t mec;
+       uint32_t pipe;
+       uint32_t queue;
+
+       struct kfd_process      *process;
+       struct kfd_dev          *device;
+};
+
+/*
+ * Please read the kfd_mqd_manager.h description.
+ */
+enum KFD_MQD_TYPE {
+       KFD_MQD_TYPE_CIK_COMPUTE = 0, /* for no cp scheduling */
+       KFD_MQD_TYPE_CIK_HIQ, /* for hiq */
+       KFD_MQD_TYPE_CIK_CP, /* for cp queues and diq */
+       KFD_MQD_TYPE_CIK_SDMA, /* for sdma queues */
+       KFD_MQD_TYPE_MAX
+};
+
+struct scheduling_resources {
+       unsigned int vmid_mask;
+       enum kfd_queue_type type;
+       uint64_t queue_mask;
+       uint64_t gws_mask;
+       uint32_t oac_mask;
+       uint32_t gds_heap_base;
+       uint32_t gds_heap_size;
+};
+
+struct process_queue_manager {
+       /* data */
+       struct kfd_process      *process;
+       unsigned int            num_concurrent_processes;
+       struct list_head        queues;
+       unsigned long           *queue_slot_bitmap;
+};
+
+struct qcm_process_device {
+       /* The Device Queue Manager that owns this data */
+       struct device_queue_manager *dqm;
+       struct process_queue_manager *pqm;
+       /* Device Queue Manager lock */
+       struct mutex *lock;
+       /* Queues list */
+       struct list_head queues_list;
+       struct list_head priv_queue_list;
+
+       unsigned int queue_count;
+       unsigned int vmid;
+       bool is_debug;
+       /*
+        * All the memory management data should be here too
+        */
+       uint64_t gds_context_area;
+       uint32_t sh_mem_config;
+       uint32_t sh_mem_bases;
+       uint32_t sh_mem_ape1_base;
+       uint32_t sh_mem_ape1_limit;
+       uint32_t page_table_base;
+       uint32_t gds_size;
+       uint32_t num_gws;
+       uint32_t num_oac;
+};
+
+/* Data that is per-process-per device. */
+struct kfd_process_device {
+       /*
+        * List of all per-device data for a process.
+        * Starts from kfd_process.per_device_data.
+        */
+       struct list_head per_device_list;
+
+       /* The device that owns this data. */
+       struct kfd_dev *dev;
+
+
+       /* per-process-per device QCM data structure */
+       struct qcm_process_device qpd;
+
+       /*Apertures*/
+       uint64_t lds_base;
+       uint64_t lds_limit;
+       uint64_t gpuvm_base;
+       uint64_t gpuvm_limit;
+       uint64_t scratch_base;
+       uint64_t scratch_limit;
+
+       /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
+       bool bound;
+};
+
+#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
+
+/* Process data */
+struct kfd_process {
+       /*
+        * kfd_process are stored in an mm_struct*->kfd_process*
+        * hash table (kfd_processes in kfd_process.c)
+        */
+       struct hlist_node kfd_processes;
+
+       struct mm_struct *mm;
+
+       struct mutex mutex;
+
+       /*
+        * In any process, the thread that started main() is the lead
+        * thread and outlives the rest.
+        * It is here because amd_iommu_bind_pasid wants a task_struct.
+        */
+       struct task_struct *lead_thread;
+
+       /* We want to receive a notification when the mm_struct is destroyed */
+       struct mmu_notifier mmu_notifier;
+
+       /* Use for delayed freeing of kfd_process structure */
+       struct rcu_head rcu;
+
+       unsigned int pasid;
+
+       /*
+        * List of kfd_process_device structures,
+        * one for each device the process is using.
+        */
+       struct list_head per_device_data;
+
+       struct process_queue_manager pqm;
+
+       /* The process's queues. */
+       size_t queue_array_size;
+
+       /* Size is queue_array_size, up to MAX_PROCESS_QUEUES. */
+       struct kfd_queue **queues;
+
+       unsigned long allocated_queue_bitmap[DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)];
+
+       /*Is the user space process 32 bit?*/
+       bool is_32bit_user_mode;
+};
+
+void kfd_process_create_wq(void);
+void kfd_process_destroy_wq(void);
+struct kfd_process *kfd_create_process(const struct task_struct *);
+struct kfd_process *kfd_get_process(const struct task_struct *);
+
+struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
+                                                       struct kfd_process *p);
+void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid);
+struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
+                                                       struct kfd_process *p,
+                                                       int create_pdd);
+
+/* Process device data iterator */
+struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p);
+struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
+                                               struct kfd_process_device *pdd);
+bool kfd_has_process_device_data(struct kfd_process *p);
+
+/* PASIDs */
+int kfd_pasid_init(void);
+void kfd_pasid_exit(void);
+bool kfd_set_pasid_limit(unsigned int new_limit);
+unsigned int kfd_get_pasid_limit(void);
+unsigned int kfd_pasid_alloc(void);
+void kfd_pasid_free(unsigned int pasid);
+
+/* Doorbells */
+void kfd_doorbell_init(struct kfd_dev *kfd);
+int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
+u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+                                       unsigned int *doorbell_off);
+void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
+u32 read_kernel_doorbell(u32 __iomem *db);
+void write_kernel_doorbell(u32 __iomem *db, u32 value);
+unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
+                                       struct kfd_process *process,
+                                       unsigned int queue_id);
+
+extern struct device *kfd_device;
+
+/* Topology */
+int kfd_topology_init(void);
+void kfd_topology_shutdown(void);
+int kfd_topology_add_device(struct kfd_dev *gpu);
+int kfd_topology_remove_device(struct kfd_dev *gpu);
+struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
+struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
+struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
+
+/* Interrupts */
+int kfd_interrupt_init(struct kfd_dev *dev);
+void kfd_interrupt_exit(struct kfd_dev *dev);
+void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
+bool enqueue_ih_ring_entry(struct kfd_dev *kfd,        const void *ih_ring_entry);
+
+/* Power Management */
+void kgd2kfd_suspend(struct kfd_dev *kfd);
+int kgd2kfd_resume(struct kfd_dev *kfd);
+
+/* amdkfd Apertures */
+int kfd_init_apertures(struct kfd_process *process);
+
+/* Queue Context Management */
+inline uint32_t lower_32(uint64_t x);
+inline uint32_t upper_32(uint64_t x);
+
+int init_queue(struct queue **q, struct queue_properties properties);
+void uninit_queue(struct queue *q);
+void print_queue_properties(struct queue_properties *q);
+void print_queue(struct queue *q);
+
+struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
+                                       struct kfd_dev *dev);
+struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
+void device_queue_manager_uninit(struct device_queue_manager *dqm);
+struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+                                       enum kfd_queue_type type);
+void kernel_queue_uninit(struct kernel_queue *kq);
+
+/* Process Queue Manager */
+struct process_queue_node {
+       struct queue *q;
+       struct kernel_queue *kq;
+       struct list_head process_queue_list;
+};
+
+int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
+void pqm_uninit(struct process_queue_manager *pqm);
+int pqm_create_queue(struct process_queue_manager *pqm,
+                           struct kfd_dev *dev,
+                           struct file *f,
+                           struct queue_properties *properties,
+                           unsigned int flags,
+                           enum kfd_queue_type type,
+                           unsigned int *qid);
+int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
+int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
+                       struct queue_properties *p);
+
+/* Packet Manager */
+
+#define KFD_HIQ_TIMEOUT (500)
+
+#define KFD_FENCE_COMPLETED (100)
+#define KFD_FENCE_INIT   (10)
+#define KFD_UNMAP_LATENCY (150)
+
+struct packet_manager {
+       struct device_queue_manager *dqm;
+       struct kernel_queue *priv_queue;
+       struct mutex lock;
+       bool allocated;
+       struct kfd_mem_obj *ib_buffer_obj;
+};
+
+int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
+void pm_uninit(struct packet_manager *pm);
+int pm_send_set_resources(struct packet_manager *pm,
+                               struct scheduling_resources *res);
+int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
+int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
+                               uint32_t fence_value);
+
+int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
+                       enum kfd_preempt_type_filter mode,
+                       uint32_t filter_param, bool reset,
+                       unsigned int sdma_engine);
+
+void pm_release_ib(struct packet_manager *pm);
+
+uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
+phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
+                                       struct kfd_process *process);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
new file mode 100644 (file)
index 0000000..b85eb0b
--- /dev/null
@@ -0,0 +1,410 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/mutex.h>
+#include <linux/log2.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/amd-iommu.h>
+#include <linux/notifier.h>
+struct mm_struct;
+
+#include "kfd_priv.h"
+
+/*
+ * Initial size for the array of queues.
+ * The allocated size is doubled each time
+ * it is exceeded up to MAX_PROCESS_QUEUES.
+ */
+#define INITIAL_QUEUE_ARRAY_SIZE 16
+
+/*
+ * List of struct kfd_process (field kfd_process).
+ * Unique/indexed by mm_struct*
+ */
+#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
+static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
+static DEFINE_MUTEX(kfd_processes_mutex);
+
+DEFINE_STATIC_SRCU(kfd_processes_srcu);
+
+static struct workqueue_struct *kfd_process_wq;
+
+struct kfd_process_release_work {
+       struct work_struct kfd_work;
+       struct kfd_process *p;
+};
+
+static struct kfd_process *find_process(const struct task_struct *thread);
+static struct kfd_process *create_process(const struct task_struct *thread);
+
+void kfd_process_create_wq(void)
+{
+       if (!kfd_process_wq)
+               kfd_process_wq = create_workqueue("kfd_process_wq");
+}
+
+void kfd_process_destroy_wq(void)
+{
+       if (kfd_process_wq) {
+               flush_workqueue(kfd_process_wq);
+               destroy_workqueue(kfd_process_wq);
+               kfd_process_wq = NULL;
+       }
+}
+
+struct kfd_process *kfd_create_process(const struct task_struct *thread)
+{
+       struct kfd_process *process;
+
+       BUG_ON(!kfd_process_wq);
+
+       if (thread->mm == NULL)
+               return ERR_PTR(-EINVAL);
+
+       /* Only the pthreads threading model is supported. */
+       if (thread->group_leader->mm != thread->mm)
+               return ERR_PTR(-EINVAL);
+
+       /* Take mmap_sem because we call __mmu_notifier_register inside */
+       down_write(&thread->mm->mmap_sem);
+
+       /*
+        * take kfd processes mutex before starting of process creation
+        * so there won't be a case where two threads of the same process
+        * create two kfd_process structures
+        */
+       mutex_lock(&kfd_processes_mutex);
+
+       /* A prior open of /dev/kfd could have already created the process. */
+       process = find_process(thread);
+       if (process)
+               pr_debug("kfd: process already found\n");
+
+       if (!process)
+               process = create_process(thread);
+
+       mutex_unlock(&kfd_processes_mutex);
+
+       up_write(&thread->mm->mmap_sem);
+
+       return process;
+}
+
+struct kfd_process *kfd_get_process(const struct task_struct *thread)
+{
+       struct kfd_process *process;
+
+       if (thread->mm == NULL)
+               return ERR_PTR(-EINVAL);
+
+       /* Only the pthreads threading model is supported. */
+       if (thread->group_leader->mm != thread->mm)
+               return ERR_PTR(-EINVAL);
+
+       process = find_process(thread);
+
+       return process;
+}
+
+static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
+{
+       struct kfd_process *process;
+
+       hash_for_each_possible_rcu(kfd_processes_table, process,
+                                       kfd_processes, (uintptr_t)mm)
+               if (process->mm == mm)
+                       return process;
+
+       return NULL;
+}
+
+static struct kfd_process *find_process(const struct task_struct *thread)
+{
+       struct kfd_process *p;
+       int idx;
+
+       idx = srcu_read_lock(&kfd_processes_srcu);
+       p = find_process_by_mm(thread->mm);
+       srcu_read_unlock(&kfd_processes_srcu, idx);
+
+       return p;
+}
+
+static void kfd_process_wq_release(struct work_struct *work)
+{
+       struct kfd_process_release_work *my_work;
+       struct kfd_process_device *pdd, *temp;
+       struct kfd_process *p;
+
+       my_work = (struct kfd_process_release_work *) work;
+
+       p = my_work->p;
+
+       mutex_lock(&p->mutex);
+
+       list_for_each_entry_safe(pdd, temp, &p->per_device_data,
+                                                       per_device_list) {
+               amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
+               list_del(&pdd->per_device_list);
+
+               kfree(pdd);
+       }
+
+       kfd_pasid_free(p->pasid);
+
+       mutex_unlock(&p->mutex);
+
+       mutex_destroy(&p->mutex);
+
+       kfree(p->queues);
+
+       kfree(p);
+
+       kfree((void *)work);
+}
+
+static void kfd_process_destroy_delayed(struct rcu_head *rcu)
+{
+       struct kfd_process_release_work *work;
+       struct kfd_process *p;
+
+       BUG_ON(!kfd_process_wq);
+
+       p = container_of(rcu, struct kfd_process, rcu);
+       BUG_ON(atomic_read(&p->mm->mm_count) <= 0);
+
+       mmdrop(p->mm);
+
+       work = (struct kfd_process_release_work *)
+               kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC);
+
+       if (work) {
+               INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
+               work->p = p;
+               queue_work(kfd_process_wq, (struct work_struct *) work);
+       }
+}
+
+static void kfd_process_notifier_release(struct mmu_notifier *mn,
+                                       struct mm_struct *mm)
+{
+       struct kfd_process *p;
+
+       /*
+        * The kfd_process structure can not be free because the
+        * mmu_notifier srcu is read locked
+        */
+       p = container_of(mn, struct kfd_process, mmu_notifier);
+       BUG_ON(p->mm != mm);
+
+       mutex_lock(&kfd_processes_mutex);
+       hash_del_rcu(&p->kfd_processes);
+       mutex_unlock(&kfd_processes_mutex);
+       synchronize_srcu(&kfd_processes_srcu);
+
+       mutex_lock(&p->mutex);
+
+       /* In case our notifier is called before IOMMU notifier */
+       pqm_uninit(&p->pqm);
+
+       mutex_unlock(&p->mutex);
+
+       /*
+        * Because we drop mm_count inside kfd_process_destroy_delayed
+        * and because the mmu_notifier_unregister function also drop
+        * mm_count we need to take an extra count here.
+        */
+       atomic_inc(&p->mm->mm_count);
+       mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
+       mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
+}
+
+static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
+       .release = kfd_process_notifier_release,
+};
+
+static struct kfd_process *create_process(const struct task_struct *thread)
+{
+       struct kfd_process *process;
+       int err = -ENOMEM;
+
+       process = kzalloc(sizeof(*process), GFP_KERNEL);
+
+       if (!process)
+               goto err_alloc_process;
+
+       process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE,
+                                       sizeof(process->queues[0]), GFP_KERNEL);
+       if (!process->queues)
+               goto err_alloc_queues;
+
+       process->pasid = kfd_pasid_alloc();
+       if (process->pasid == 0)
+               goto err_alloc_pasid;
+
+       mutex_init(&process->mutex);
+
+       process->mm = thread->mm;
+
+       /* register notifier */
+       process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
+       err = __mmu_notifier_register(&process->mmu_notifier, process->mm);
+       if (err)
+               goto err_mmu_notifier;
+
+       hash_add_rcu(kfd_processes_table, &process->kfd_processes,
+                       (uintptr_t)process->mm);
+
+       process->lead_thread = thread->group_leader;
+
+       process->queue_array_size = INITIAL_QUEUE_ARRAY_SIZE;
+
+       INIT_LIST_HEAD(&process->per_device_data);
+
+       err = pqm_init(&process->pqm, process);
+       if (err != 0)
+               goto err_process_pqm_init;
+
+       return process;
+
+err_process_pqm_init:
+       hash_del_rcu(&process->kfd_processes);
+       synchronize_rcu();
+       mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
+err_mmu_notifier:
+       kfd_pasid_free(process->pasid);
+err_alloc_pasid:
+       kfree(process->queues);
+err_alloc_queues:
+       kfree(process);
+err_alloc_process:
+       return ERR_PTR(err);
+}
+
+struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
+                                                       struct kfd_process *p,
+                                                       int create_pdd)
+{
+       struct kfd_process_device *pdd = NULL;
+
+       list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+               if (pdd->dev == dev)
+                       return pdd;
+
+       if (create_pdd) {
+               pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
+               if (pdd != NULL) {
+                       pdd->dev = dev;
+                       INIT_LIST_HEAD(&pdd->qpd.queues_list);
+                       INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
+                       pdd->qpd.dqm = dev->dqm;
+                       list_add(&pdd->per_device_list, &p->per_device_data);
+               }
+       }
+
+       return pdd;
+}
+
+/*
+ * Direct the IOMMU to bind the process (specifically the pasid->mm)
+ * to the device.
+ * Unbinding occurs when the process dies or the device is removed.
+ *
+ * Assumes that the process lock is held.
+ */
+struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
+                                                       struct kfd_process *p)
+{
+       struct kfd_process_device *pdd = kfd_get_process_device_data(dev, p, 1);
+       int err;
+
+       if (pdd == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       if (pdd->bound)
+               return pdd;
+
+       err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
+       if (err < 0)
+               return ERR_PTR(err);
+
+       pdd->bound = true;
+
+       return pdd;
+}
+
+void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
+{
+       struct kfd_process *p;
+       struct kfd_process_device *pdd;
+       int idx, i;
+
+       BUG_ON(dev == NULL);
+
+       idx = srcu_read_lock(&kfd_processes_srcu);
+
+       hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
+               if (p->pasid == pasid)
+                       break;
+
+       srcu_read_unlock(&kfd_processes_srcu, idx);
+
+       BUG_ON(p->pasid != pasid);
+
+       mutex_lock(&p->mutex);
+
+       pqm_uninit(&p->pqm);
+
+       pdd = kfd_get_process_device_data(dev, p, 0);
+
+       /*
+        * Just mark pdd as unbound, because we still need it to call
+        * amd_iommu_unbind_pasid() in when the process exits.
+        * We don't call amd_iommu_unbind_pasid() here
+        * because the IOMMU called us.
+        */
+       if (pdd)
+               pdd->bound = false;
+
+       mutex_unlock(&p->mutex);
+}
+
+struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
+{
+       return list_first_entry(&p->per_device_data,
+                               struct kfd_process_device,
+                               per_device_list);
+}
+
+struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
+                                               struct kfd_process_device *pdd)
+{
+       if (list_is_last(&pdd->per_device_list, &p->per_device_data))
+               return NULL;
+       return list_next_entry(pdd, per_device_list);
+}
+
+bool kfd_has_process_device_data(struct kfd_process *p)
+{
+       return !(list_empty(&p->per_device_data));
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
new file mode 100644 (file)
index 0000000..4752678
--- /dev/null
@@ -0,0 +1,343 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include "kfd_device_queue_manager.h"
+#include "kfd_priv.h"
+#include "kfd_kernel_queue.h"
+
+static inline struct process_queue_node *get_queue_by_qid(
+                       struct process_queue_manager *pqm, unsigned int qid)
+{
+       struct process_queue_node *pqn;
+
+       BUG_ON(!pqm);
+
+       list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+               if (pqn->q && pqn->q->properties.queue_id == qid)
+                       return pqn;
+               if (pqn->kq && pqn->kq->queue->properties.queue_id == qid)
+                       return pqn;
+       }
+
+       return NULL;
+}
+
+static int find_available_queue_slot(struct process_queue_manager *pqm,
+                                       unsigned int *qid)
+{
+       unsigned long found;
+
+       BUG_ON(!pqm || !qid);
+
+       pr_debug("kfd: in %s\n", __func__);
+
+       found = find_first_zero_bit(pqm->queue_slot_bitmap,
+                       max_num_of_queues_per_process);
+
+       pr_debug("kfd: the new slot id %lu\n", found);
+
+       if (found >= max_num_of_queues_per_process) {
+               pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
+                               pqm->process->pasid);
+               return -ENOMEM;
+       }
+
+       set_bit(found, pqm->queue_slot_bitmap);
+       *qid = found;
+
+       return 0;
+}
+
+int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
+{
+       BUG_ON(!pqm);
+
+       INIT_LIST_HEAD(&pqm->queues);
+       pqm->queue_slot_bitmap =
+                       kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process,
+                                       BITS_PER_BYTE), GFP_KERNEL);
+       if (pqm->queue_slot_bitmap == NULL)
+               return -ENOMEM;
+       pqm->process = p;
+
+       return 0;
+}
+
+void pqm_uninit(struct process_queue_manager *pqm)
+{
+       int retval;
+       struct process_queue_node *pqn, *next;
+
+       BUG_ON(!pqm);
+
+       pr_debug("In func %s\n", __func__);
+
+       list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
+               retval = pqm_destroy_queue(
+                               pqm,
+                               (pqn->q != NULL) ?
+                                       pqn->q->properties.queue_id :
+                                       pqn->kq->queue->properties.queue_id);
+
+               if (retval != 0) {
+                       pr_err("kfd: failed to destroy queue\n");
+                       return;
+               }
+       }
+       kfree(pqm->queue_slot_bitmap);
+       pqm->queue_slot_bitmap = NULL;
+}
+
+static int create_cp_queue(struct process_queue_manager *pqm,
+                               struct kfd_dev *dev, struct queue **q,
+                               struct queue_properties *q_properties,
+                               struct file *f, unsigned int qid)
+{
+       int retval;
+
+       retval = 0;
+
+       /* Doorbell initialized in user space*/
+       q_properties->doorbell_ptr = NULL;
+
+       q_properties->doorbell_off =
+                       kfd_queue_id_to_doorbell(dev, pqm->process, qid);
+
+       /* let DQM handle it*/
+       q_properties->vmid = 0;
+       q_properties->queue_id = qid;
+       q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
+
+       retval = init_queue(q, *q_properties);
+       if (retval != 0)
+               goto err_init_queue;
+
+       (*q)->device = dev;
+       (*q)->process = pqm->process;
+
+       pr_debug("kfd: PQM After init queue");
+
+       return retval;
+
+err_init_queue:
+       return retval;
+}
+
+int pqm_create_queue(struct process_queue_manager *pqm,
+                           struct kfd_dev *dev,
+                           struct file *f,
+                           struct queue_properties *properties,
+                           unsigned int flags,
+                           enum kfd_queue_type type,
+                           unsigned int *qid)
+{
+       int retval;
+       struct kfd_process_device *pdd;
+       struct queue_properties q_properties;
+       struct queue *q;
+       struct process_queue_node *pqn;
+       struct kernel_queue *kq;
+
+       BUG_ON(!pqm || !dev || !properties || !qid);
+
+       memset(&q_properties, 0, sizeof(struct queue_properties));
+       memcpy(&q_properties, properties, sizeof(struct queue_properties));
+       q = NULL;
+       kq = NULL;
+
+       pdd = kfd_get_process_device_data(dev, pqm->process, 1);
+       BUG_ON(!pdd);
+
+       retval = find_available_queue_slot(pqm, qid);
+       if (retval != 0)
+               return retval;
+
+       if (list_empty(&pqm->queues)) {
+               pdd->qpd.pqm = pqm;
+               dev->dqm->register_process(dev->dqm, &pdd->qpd);
+       }
+
+       pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL);
+       if (!pqn) {
+               retval = -ENOMEM;
+               goto err_allocate_pqn;
+       }
+
+       switch (type) {
+       case KFD_QUEUE_TYPE_COMPUTE:
+               /* check if there is over subscription */
+               if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
+               ((dev->dqm->processes_count >= VMID_PER_DEVICE) ||
+               (dev->dqm->queue_count >= PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE))) {
+                       pr_err("kfd: over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
+                       retval = -EPERM;
+                       goto err_create_queue;
+               }
+
+               retval = create_cp_queue(pqm, dev, &q, &q_properties, f, *qid);
+               if (retval != 0)
+                       goto err_create_queue;
+               pqn->q = q;
+               pqn->kq = NULL;
+               retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
+                                               &q->properties.vmid);
+               print_queue(q);
+               break;
+       case KFD_QUEUE_TYPE_DIQ:
+               kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
+               if (kq == NULL) {
+                       retval = -ENOMEM;
+                       goto err_create_queue;
+               }
+               kq->queue->properties.queue_id = *qid;
+               pqn->kq = kq;
+               pqn->q = NULL;
+               retval = dev->dqm->create_kernel_queue(dev->dqm, kq, &pdd->qpd);
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       if (retval != 0) {
+               pr_err("kfd: error dqm create queue\n");
+               goto err_create_queue;
+       }
+
+       pr_debug("kfd: PQM After DQM create queue\n");
+
+       list_add(&pqn->process_queue_list, &pqm->queues);
+
+       if (q) {
+               *properties = q->properties;
+               pr_debug("kfd: PQM done creating queue\n");
+               print_queue_properties(properties);
+       }
+
+       return retval;
+
+err_create_queue:
+       kfree(pqn);
+err_allocate_pqn:
+       clear_bit(*qid, pqm->queue_slot_bitmap);
+       return retval;
+}
+
+int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+{
+       struct process_queue_node *pqn;
+       struct kfd_process_device *pdd;
+       struct device_queue_manager *dqm;
+       struct kfd_dev *dev;
+       int retval;
+
+       dqm = NULL;
+
+       BUG_ON(!pqm);
+       retval = 0;
+
+       pr_debug("kfd: In Func %s\n", __func__);
+
+       pqn = get_queue_by_qid(pqm, qid);
+       if (pqn == NULL) {
+               pr_err("kfd: queue id does not match any known queue\n");
+               return -EINVAL;
+       }
+
+       dev = NULL;
+       if (pqn->kq)
+               dev = pqn->kq->dev;
+       if (pqn->q)
+               dev = pqn->q->device;
+       BUG_ON(!dev);
+
+       pdd = kfd_get_process_device_data(dev, pqm->process, 1);
+       BUG_ON(!pdd);
+
+       if (pqn->kq) {
+               /* destroy kernel queue (DIQ) */
+               dqm = pqn->kq->dev->dqm;
+               dqm->destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
+               kernel_queue_uninit(pqn->kq);
+       }
+
+       if (pqn->q) {
+               dqm = pqn->q->device->dqm;
+               retval = dqm->destroy_queue(dqm, &pdd->qpd, pqn->q);
+               if (retval != 0)
+                       return retval;
+
+               uninit_queue(pqn->q);
+       }
+
+       list_del(&pqn->process_queue_list);
+       kfree(pqn);
+       clear_bit(qid, pqm->queue_slot_bitmap);
+
+       if (list_empty(&pqm->queues))
+               dqm->unregister_process(dqm, &pdd->qpd);
+
+       return retval;
+}
+
+int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
+                       struct queue_properties *p)
+{
+       int retval;
+       struct process_queue_node *pqn;
+
+       BUG_ON(!pqm);
+
+       pqn = get_queue_by_qid(pqm, qid);
+       BUG_ON(!pqn);
+
+       pqn->q->properties.queue_address = p->queue_address;
+       pqn->q->properties.queue_size = p->queue_size;
+       pqn->q->properties.queue_percent = p->queue_percent;
+       pqn->q->properties.priority = p->priority;
+
+       retval = pqn->q->device->dqm->update_queue(pqn->q->device->dqm, pqn->q);
+       if (retval != 0)
+               return retval;
+
+       return 0;
+}
+
+static __attribute__((unused)) struct kernel_queue *pqm_get_kernel_queue(
+                                       struct process_queue_manager *pqm,
+                                       unsigned int qid)
+{
+       struct process_queue_node *pqn;
+
+       BUG_ON(!pqm);
+
+       pqn = get_queue_by_qid(pqm, qid);
+       if (pqn && pqn->kq)
+               return pqn->kq;
+
+       return NULL;
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
new file mode 100644 (file)
index 0000000..9a0c90b
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/slab.h>
+#include "kfd_priv.h"
+
+void print_queue_properties(struct queue_properties *q)
+{
+       if (!q)
+               return;
+
+       pr_debug("Printing queue properties:\n");
+       pr_debug("Queue Type: %u\n", q->type);
+       pr_debug("Queue Size: %llu\n", q->queue_size);
+       pr_debug("Queue percent: %u\n", q->queue_percent);
+       pr_debug("Queue Address: 0x%llX\n", q->queue_address);
+       pr_debug("Queue Id: %u\n", q->queue_id);
+       pr_debug("Queue Process Vmid: %u\n", q->vmid);
+       pr_debug("Queue Read Pointer: 0x%p\n", q->read_ptr);
+       pr_debug("Queue Write Pointer: 0x%p\n", q->write_ptr);
+       pr_debug("Queue Doorbell Pointer: 0x%p\n", q->doorbell_ptr);
+       pr_debug("Queue Doorbell Offset: %u\n", q->doorbell_off);
+}
+
+void print_queue(struct queue *q)
+{
+       if (!q)
+               return;
+       pr_debug("Printing queue:\n");
+       pr_debug("Queue Type: %u\n", q->properties.type);
+       pr_debug("Queue Size: %llu\n", q->properties.queue_size);
+       pr_debug("Queue percent: %u\n", q->properties.queue_percent);
+       pr_debug("Queue Address: 0x%llX\n", q->properties.queue_address);
+       pr_debug("Queue Id: %u\n", q->properties.queue_id);
+       pr_debug("Queue Process Vmid: %u\n", q->properties.vmid);
+       pr_debug("Queue Read Pointer: 0x%p\n", q->properties.read_ptr);
+       pr_debug("Queue Write Pointer: 0x%p\n", q->properties.write_ptr);
+       pr_debug("Queue Doorbell Pointer: 0x%p\n", q->properties.doorbell_ptr);
+       pr_debug("Queue Doorbell Offset: %u\n", q->properties.doorbell_off);
+       pr_debug("Queue MQD Address: 0x%p\n", q->mqd);
+       pr_debug("Queue MQD Gart: 0x%llX\n", q->gart_mqd_addr);
+       pr_debug("Queue Process Address: 0x%p\n", q->process);
+       pr_debug("Queue Device Address: 0x%p\n", q->device);
+}
+
+int init_queue(struct queue **q, struct queue_properties properties)
+{
+       struct queue *tmp;
+
+       BUG_ON(!q);
+
+       tmp = kzalloc(sizeof(struct queue), GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
+
+       memcpy(&tmp->properties, &properties, sizeof(struct queue_properties));
+
+       *q = tmp;
+       return 0;
+}
+
+void uninit_queue(struct queue *q)
+{
+       kfree(q);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
new file mode 100644 (file)
index 0000000..5733e28
--- /dev/null
@@ -0,0 +1,1235 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/hash.h>
+#include <linux/cpufreq.h>
+
+#include "kfd_priv.h"
+#include "kfd_crat.h"
+#include "kfd_topology.h"
+
+static struct list_head topology_device_list;
+static int topology_crat_parsed;
+static struct kfd_system_properties sys_props;
+
+static DECLARE_RWSEM(topology_lock);
+
+struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+{
+       struct kfd_topology_device *top_dev;
+       struct kfd_dev *device = NULL;
+
+       down_read(&topology_lock);
+
+       list_for_each_entry(top_dev, &topology_device_list, list)
+               if (top_dev->gpu_id == gpu_id) {
+                       device = top_dev->gpu;
+                       break;
+               }
+
+       up_read(&topology_lock);
+
+       return device;
+}
+
+struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
+{
+       struct kfd_topology_device *top_dev;
+       struct kfd_dev *device = NULL;
+
+       down_read(&topology_lock);
+
+       list_for_each_entry(top_dev, &topology_device_list, list)
+               if (top_dev->gpu->pdev == pdev) {
+                       device = top_dev->gpu;
+                       break;
+               }
+
+       up_read(&topology_lock);
+
+       return device;
+}
+
+static int kfd_topology_get_crat_acpi(void *crat_image, size_t *size)
+{
+       struct acpi_table_header *crat_table;
+       acpi_status status;
+
+       if (!size)
+               return -EINVAL;
+
+       /*
+        * Fetch the CRAT table from ACPI
+        */
+       status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
+       if (status == AE_NOT_FOUND) {
+               pr_warn("CRAT table not found\n");
+               return -ENODATA;
+       } else if (ACPI_FAILURE(status)) {
+               const char *err = acpi_format_exception(status);
+
+               pr_err("CRAT table error: %s\n", err);
+               return -EINVAL;
+       }
+
+       if (*size >= crat_table->length && crat_image != NULL)
+               memcpy(crat_image, crat_table, crat_table->length);
+
+       *size = crat_table->length;
+
+       return 0;
+}
+
+static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
+               struct crat_subtype_computeunit *cu)
+{
+       BUG_ON(!dev);
+       BUG_ON(!cu);
+
+       dev->node_props.cpu_cores_count = cu->num_cpu_cores;
+       dev->node_props.cpu_core_id_base = cu->processor_id_low;
+       if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
+               dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
+
+       pr_info("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
+                       cu->processor_id_low);
+}
+
+static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
+               struct crat_subtype_computeunit *cu)
+{
+       BUG_ON(!dev);
+       BUG_ON(!cu);
+
+       dev->node_props.simd_id_base = cu->processor_id_low;
+       dev->node_props.simd_count = cu->num_simd_cores;
+       dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
+       dev->node_props.max_waves_per_simd = cu->max_waves_simd;
+       dev->node_props.wave_front_size = cu->wave_front_size;
+       dev->node_props.mem_banks_count = cu->num_banks;
+       dev->node_props.array_count = cu->num_arrays;
+       dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
+       dev->node_props.simd_per_cu = cu->num_simd_per_cu;
+       dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
+       if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
+               dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
+       pr_info("CU GPU: simds=%d id_base=%d\n", cu->num_simd_cores,
+                               cu->processor_id_low);
+}
+
+/* kfd_parse_subtype_cu is called when the topology mutex is already acquired */
+static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu)
+{
+       struct kfd_topology_device *dev;
+       int i = 0;
+
+       BUG_ON(!cu);
+
+       pr_info("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
+                       cu->proximity_domain, cu->hsa_capability);
+       list_for_each_entry(dev, &topology_device_list, list) {
+               if (cu->proximity_domain == i) {
+                       if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
+                               kfd_populated_cu_info_cpu(dev, cu);
+
+                       if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
+                               kfd_populated_cu_info_gpu(dev, cu);
+                       break;
+               }
+               i++;
+       }
+
+       return 0;
+}
+
+/*
+ * kfd_parse_subtype_mem is called when the topology mutex is
+ * already acquired
+ */
+static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem)
+{
+       struct kfd_mem_properties *props;
+       struct kfd_topology_device *dev;
+       int i = 0;
+
+       BUG_ON(!mem);
+
+       pr_info("Found memory entry in CRAT table with proximity_domain=%d\n",
+                       mem->promixity_domain);
+       list_for_each_entry(dev, &topology_device_list, list) {
+               if (mem->promixity_domain == i) {
+                       props = kfd_alloc_struct(props);
+                       if (props == NULL)
+                               return -ENOMEM;
+
+                       if (dev->node_props.cpu_cores_count == 0)
+                               props->heap_type = HSA_MEM_HEAP_TYPE_FB_PRIVATE;
+                       else
+                               props->heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
+
+                       if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
+                               props->flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
+                       if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
+                               props->flags |= HSA_MEM_FLAGS_NON_VOLATILE;
+
+                       props->size_in_bytes =
+                               ((uint64_t)mem->length_high << 32) +
+                                                       mem->length_low;
+                       props->width = mem->width;
+
+                       dev->mem_bank_count++;
+                       list_add_tail(&props->list, &dev->mem_props);
+
+                       break;
+               }
+               i++;
+       }
+
+       return 0;
+}
+
+/*
+ * kfd_parse_subtype_cache is called when the topology mutex
+ * is already acquired
+ */
+static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache)
+{
+       struct kfd_cache_properties *props;
+       struct kfd_topology_device *dev;
+       uint32_t id;
+
+       BUG_ON(!cache);
+
+       id = cache->processor_id_low;
+
+       pr_info("Found cache entry in CRAT table with processor_id=%d\n", id);
+       list_for_each_entry(dev, &topology_device_list, list)
+               if (id == dev->node_props.cpu_core_id_base ||
+                   id == dev->node_props.simd_id_base) {
+                       props = kfd_alloc_struct(props);
+                       if (props == NULL)
+                               return -ENOMEM;
+
+                       props->processor_id_low = id;
+                       props->cache_level = cache->cache_level;
+                       props->cache_size = cache->cache_size;
+                       props->cacheline_size = cache->cache_line_size;
+                       props->cachelines_per_tag = cache->lines_per_tag;
+                       props->cache_assoc = cache->associativity;
+                       props->cache_latency = cache->cache_latency;
+
+                       if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
+                               props->cache_type |= HSA_CACHE_TYPE_DATA;
+                       if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
+                               props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
+                       if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
+                               props->cache_type |= HSA_CACHE_TYPE_CPU;
+                       if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
+                               props->cache_type |= HSA_CACHE_TYPE_HSACU;
+
+                       dev->cache_count++;
+                       dev->node_props.caches_count++;
+                       list_add_tail(&props->list, &dev->cache_props);
+
+                       break;
+               }
+
+       return 0;
+}
+
+/*
+ * kfd_parse_subtype_iolink is called when the topology mutex
+ * is already acquired
+ */
+static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink)
+{
+       struct kfd_iolink_properties *props;
+       struct kfd_topology_device *dev;
+       uint32_t i = 0;
+       uint32_t id_from;
+       uint32_t id_to;
+
+       BUG_ON(!iolink);
+
+       id_from = iolink->proximity_domain_from;
+       id_to = iolink->proximity_domain_to;
+
+       pr_info("Found IO link entry in CRAT table with id_from=%d\n", id_from);
+       list_for_each_entry(dev, &topology_device_list, list) {
+               if (id_from == i) {
+                       props = kfd_alloc_struct(props);
+                       if (props == NULL)
+                               return -ENOMEM;
+
+                       props->node_from = id_from;
+                       props->node_to = id_to;
+                       props->ver_maj = iolink->version_major;
+                       props->ver_min = iolink->version_minor;
+
+                       /*
+                        * weight factor (derived from CDIR), currently always 1
+                        */
+                       props->weight = 1;
+
+                       props->min_latency = iolink->minimum_latency;
+                       props->max_latency = iolink->maximum_latency;
+                       props->min_bandwidth = iolink->minimum_bandwidth_mbs;
+                       props->max_bandwidth = iolink->maximum_bandwidth_mbs;
+                       props->rec_transfer_size =
+                                       iolink->recommended_transfer_size;
+
+                       dev->io_link_count++;
+                       dev->node_props.io_links_count++;
+                       list_add_tail(&props->list, &dev->io_link_props);
+
+                       break;
+               }
+               i++;
+       }
+
+       return 0;
+}
+
+static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr)
+{
+       struct crat_subtype_computeunit *cu;
+       struct crat_subtype_memory *mem;
+       struct crat_subtype_cache *cache;
+       struct crat_subtype_iolink *iolink;
+       int ret = 0;
+
+       BUG_ON(!sub_type_hdr);
+
+       switch (sub_type_hdr->type) {
+       case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
+               cu = (struct crat_subtype_computeunit *)sub_type_hdr;
+               ret = kfd_parse_subtype_cu(cu);
+               break;
+       case CRAT_SUBTYPE_MEMORY_AFFINITY:
+               mem = (struct crat_subtype_memory *)sub_type_hdr;
+               ret = kfd_parse_subtype_mem(mem);
+               break;
+       case CRAT_SUBTYPE_CACHE_AFFINITY:
+               cache = (struct crat_subtype_cache *)sub_type_hdr;
+               ret = kfd_parse_subtype_cache(cache);
+               break;
+       case CRAT_SUBTYPE_TLB_AFFINITY:
+               /*
+                * For now, nothing to do here
+                */
+               pr_info("Found TLB entry in CRAT table (not processing)\n");
+               break;
+       case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
+               /*
+                * For now, nothing to do here
+                */
+               pr_info("Found CCOMPUTE entry in CRAT table (not processing)\n");
+               break;
+       case CRAT_SUBTYPE_IOLINK_AFFINITY:
+               iolink = (struct crat_subtype_iolink *)sub_type_hdr;
+               ret = kfd_parse_subtype_iolink(iolink);
+               break;
+       default:
+               pr_warn("Unknown subtype (%d) in CRAT\n",
+                               sub_type_hdr->type);
+       }
+
+       return ret;
+}
+
+static void kfd_release_topology_device(struct kfd_topology_device *dev)
+{
+       struct kfd_mem_properties *mem;
+       struct kfd_cache_properties *cache;
+       struct kfd_iolink_properties *iolink;
+
+       BUG_ON(!dev);
+
+       list_del(&dev->list);
+
+       while (dev->mem_props.next != &dev->mem_props) {
+               mem = container_of(dev->mem_props.next,
+                               struct kfd_mem_properties, list);
+               list_del(&mem->list);
+               kfree(mem);
+       }
+
+       while (dev->cache_props.next != &dev->cache_props) {
+               cache = container_of(dev->cache_props.next,
+                               struct kfd_cache_properties, list);
+               list_del(&cache->list);
+               kfree(cache);
+       }
+
+       while (dev->io_link_props.next != &dev->io_link_props) {
+               iolink = container_of(dev->io_link_props.next,
+                               struct kfd_iolink_properties, list);
+               list_del(&iolink->list);
+               kfree(iolink);
+       }
+
+       kfree(dev);
+
+       sys_props.num_devices--;
+}
+
+static void kfd_release_live_view(void)
+{
+       struct kfd_topology_device *dev;
+
+       while (topology_device_list.next != &topology_device_list) {
+               dev = container_of(topology_device_list.next,
+                                struct kfd_topology_device, list);
+               kfd_release_topology_device(dev);
+}
+
+       memset(&sys_props, 0, sizeof(sys_props));
+}
+
+static struct kfd_topology_device *kfd_create_topology_device(void)
+{
+       struct kfd_topology_device *dev;
+
+       dev = kfd_alloc_struct(dev);
+       if (dev == NULL) {
+               pr_err("No memory to allocate a topology device");
+               return NULL;
+       }
+
+       INIT_LIST_HEAD(&dev->mem_props);
+       INIT_LIST_HEAD(&dev->cache_props);
+       INIT_LIST_HEAD(&dev->io_link_props);
+
+       list_add_tail(&dev->list, &topology_device_list);
+       sys_props.num_devices++;
+
+       return dev;
+}
+
+static int kfd_parse_crat_table(void *crat_image)
+{
+       struct kfd_topology_device *top_dev;
+       struct crat_subtype_generic *sub_type_hdr;
+       uint16_t node_id;
+       int ret;
+       struct crat_header *crat_table = (struct crat_header *)crat_image;
+       uint16_t num_nodes;
+       uint32_t image_len;
+
+       if (!crat_image)
+               return -EINVAL;
+
+       num_nodes = crat_table->num_domains;
+       image_len = crat_table->length;
+
+       pr_info("Parsing CRAT table with %d nodes\n", num_nodes);
+
+       for (node_id = 0; node_id < num_nodes; node_id++) {
+               top_dev = kfd_create_topology_device();
+               if (!top_dev) {
+                       kfd_release_live_view();
+                       return -ENOMEM;
+               }
+       }
+
+       sys_props.platform_id =
+               (*((uint64_t *)crat_table->oem_id)) & CRAT_OEMID_64BIT_MASK;
+       sys_props.platform_oem = *((uint64_t *)crat_table->oem_table_id);
+       sys_props.platform_rev = crat_table->revision;
+
+       sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
+       while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
+                       ((char *)crat_image) + image_len) {
+               if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
+                       ret = kfd_parse_subtype(sub_type_hdr);
+                       if (ret != 0) {
+                               kfd_release_live_view();
+                               return ret;
+                       }
+               }
+
+               sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+                               sub_type_hdr->length);
+       }
+
+       sys_props.generation_count++;
+       topology_crat_parsed = 1;
+
+       return 0;
+}
+
+
+#define sysfs_show_gen_prop(buffer, fmt, ...) \
+               snprintf(buffer, PAGE_SIZE, "%s"fmt, buffer, __VA_ARGS__)
+#define sysfs_show_32bit_prop(buffer, name, value) \
+               sysfs_show_gen_prop(buffer, "%s %u\n", name, value)
+#define sysfs_show_64bit_prop(buffer, name, value) \
+               sysfs_show_gen_prop(buffer, "%s %llu\n", name, value)
+#define sysfs_show_32bit_val(buffer, value) \
+               sysfs_show_gen_prop(buffer, "%u\n", value)
+#define sysfs_show_str_val(buffer, value) \
+               sysfs_show_gen_prop(buffer, "%s\n", value)
+
+static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr,
+               char *buffer)
+{
+       ssize_t ret;
+
+       /* Making sure that the buffer is an empty string */
+       buffer[0] = 0;
+
+       if (attr == &sys_props.attr_genid) {
+               ret = sysfs_show_32bit_val(buffer, sys_props.generation_count);
+       } else if (attr == &sys_props.attr_props) {
+               sysfs_show_64bit_prop(buffer, "platform_oem",
+                               sys_props.platform_oem);
+               sysfs_show_64bit_prop(buffer, "platform_id",
+                               sys_props.platform_id);
+               ret = sysfs_show_64bit_prop(buffer, "platform_rev",
+                               sys_props.platform_rev);
+       } else {
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+static const struct sysfs_ops sysprops_ops = {
+       .show = sysprops_show,
+};
+
+static struct kobj_type sysprops_type = {
+       .sysfs_ops = &sysprops_ops,
+};
+
+static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr,
+               char *buffer)
+{
+       ssize_t ret;
+       struct kfd_iolink_properties *iolink;
+
+       /* Making sure that the buffer is an empty string */
+       buffer[0] = 0;
+
+       iolink = container_of(attr, struct kfd_iolink_properties, attr);
+       sysfs_show_32bit_prop(buffer, "type", iolink->iolink_type);
+       sysfs_show_32bit_prop(buffer, "version_major", iolink->ver_maj);
+       sysfs_show_32bit_prop(buffer, "version_minor", iolink->ver_min);
+       sysfs_show_32bit_prop(buffer, "node_from", iolink->node_from);
+       sysfs_show_32bit_prop(buffer, "node_to", iolink->node_to);
+       sysfs_show_32bit_prop(buffer, "weight", iolink->weight);
+       sysfs_show_32bit_prop(buffer, "min_latency", iolink->min_latency);
+       sysfs_show_32bit_prop(buffer, "max_latency", iolink->max_latency);
+       sysfs_show_32bit_prop(buffer, "min_bandwidth", iolink->min_bandwidth);
+       sysfs_show_32bit_prop(buffer, "max_bandwidth", iolink->max_bandwidth);
+       sysfs_show_32bit_prop(buffer, "recommended_transfer_size",
+                       iolink->rec_transfer_size);
+       ret = sysfs_show_32bit_prop(buffer, "flags", iolink->flags);
+
+       return ret;
+}
+
+static const struct sysfs_ops iolink_ops = {
+       .show = iolink_show,
+};
+
+static struct kobj_type iolink_type = {
+       .sysfs_ops = &iolink_ops,
+};
+
+static ssize_t mem_show(struct kobject *kobj, struct attribute *attr,
+               char *buffer)
+{
+       ssize_t ret;
+       struct kfd_mem_properties *mem;
+
+       /* Making sure that the buffer is an empty string */
+       buffer[0] = 0;
+
+       mem = container_of(attr, struct kfd_mem_properties, attr);
+       sysfs_show_32bit_prop(buffer, "heap_type", mem->heap_type);
+       sysfs_show_64bit_prop(buffer, "size_in_bytes", mem->size_in_bytes);
+       sysfs_show_32bit_prop(buffer, "flags", mem->flags);
+       sysfs_show_32bit_prop(buffer, "width", mem->width);
+       ret = sysfs_show_32bit_prop(buffer, "mem_clk_max", mem->mem_clk_max);
+
+       return ret;
+}
+
+static const struct sysfs_ops mem_ops = {
+       .show = mem_show,
+};
+
+static struct kobj_type mem_type = {
+       .sysfs_ops = &mem_ops,
+};
+
+static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
+               char *buffer)
+{
+       ssize_t ret;
+       uint32_t i;
+       struct kfd_cache_properties *cache;
+
+       /* Making sure that the buffer is an empty string */
+       buffer[0] = 0;
+
+       cache = container_of(attr, struct kfd_cache_properties, attr);
+       sysfs_show_32bit_prop(buffer, "processor_id_low",
+                       cache->processor_id_low);
+       sysfs_show_32bit_prop(buffer, "level", cache->cache_level);
+       sysfs_show_32bit_prop(buffer, "size", cache->cache_size);
+       sysfs_show_32bit_prop(buffer, "cache_line_size", cache->cacheline_size);
+       sysfs_show_32bit_prop(buffer, "cache_lines_per_tag",
+                       cache->cachelines_per_tag);
+       sysfs_show_32bit_prop(buffer, "association", cache->cache_assoc);
+       sysfs_show_32bit_prop(buffer, "latency", cache->cache_latency);
+       sysfs_show_32bit_prop(buffer, "type", cache->cache_type);
+       snprintf(buffer, PAGE_SIZE, "%ssibling_map ", buffer);
+       for (i = 0; i < KFD_TOPOLOGY_CPU_SIBLINGS; i++)
+               ret = snprintf(buffer, PAGE_SIZE, "%s%d%s",
+                               buffer, cache->sibling_map[i],
+                               (i == KFD_TOPOLOGY_CPU_SIBLINGS-1) ?
+                                               "\n" : ",");
+
+       return ret;
+}
+
+static const struct sysfs_ops cache_ops = {
+       .show = kfd_cache_show,
+};
+
+static struct kobj_type cache_type = {
+       .sysfs_ops = &cache_ops,
+};
+
+static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
+               char *buffer)
+{
+       ssize_t ret;
+       struct kfd_topology_device *dev;
+       char public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
+       uint32_t i;
+
+       /* Making sure that the buffer is an empty string */
+       buffer[0] = 0;
+
+       if (strcmp(attr->name, "gpu_id") == 0) {
+               dev = container_of(attr, struct kfd_topology_device,
+                               attr_gpuid);
+               ret = sysfs_show_32bit_val(buffer, dev->gpu_id);
+       } else if (strcmp(attr->name, "name") == 0) {
+               dev = container_of(attr, struct kfd_topology_device,
+                               attr_name);
+               for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE; i++) {
+                       public_name[i] =
+                                       (char)dev->node_props.marketing_name[i];
+                       if (dev->node_props.marketing_name[i] == 0)
+                               break;
+               }
+               public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1] = 0x0;
+               ret = sysfs_show_str_val(buffer, public_name);
+       } else {
+               dev = container_of(attr, struct kfd_topology_device,
+                               attr_props);
+               sysfs_show_32bit_prop(buffer, "cpu_cores_count",
+                               dev->node_props.cpu_cores_count);
+               sysfs_show_32bit_prop(buffer, "simd_count",
+                               dev->node_props.simd_count);
+
+               if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
+                       pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
+                                       dev->node_props.mem_banks_count,
+                                       dev->mem_bank_count);
+                       sysfs_show_32bit_prop(buffer, "mem_banks_count",
+                                       dev->mem_bank_count);
+               } else {
+                       sysfs_show_32bit_prop(buffer, "mem_banks_count",
+                                       dev->node_props.mem_banks_count);
+               }
+
+               sysfs_show_32bit_prop(buffer, "caches_count",
+                               dev->node_props.caches_count);
+               sysfs_show_32bit_prop(buffer, "io_links_count",
+                               dev->node_props.io_links_count);
+               sysfs_show_32bit_prop(buffer, "cpu_core_id_base",
+                               dev->node_props.cpu_core_id_base);
+               sysfs_show_32bit_prop(buffer, "simd_id_base",
+                               dev->node_props.simd_id_base);
+               sysfs_show_32bit_prop(buffer, "capability",
+                               dev->node_props.capability);
+               sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
+                               dev->node_props.max_waves_per_simd);
+               sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
+                               dev->node_props.lds_size_in_kb);
+               sysfs_show_32bit_prop(buffer, "gds_size_in_kb",
+                               dev->node_props.gds_size_in_kb);
+               sysfs_show_32bit_prop(buffer, "wave_front_size",
+                               dev->node_props.wave_front_size);
+               sysfs_show_32bit_prop(buffer, "array_count",
+                               dev->node_props.array_count);
+               sysfs_show_32bit_prop(buffer, "simd_arrays_per_engine",
+                               dev->node_props.simd_arrays_per_engine);
+               sysfs_show_32bit_prop(buffer, "cu_per_simd_array",
+                               dev->node_props.cu_per_simd_array);
+               sysfs_show_32bit_prop(buffer, "simd_per_cu",
+                               dev->node_props.simd_per_cu);
+               sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
+                               dev->node_props.max_slots_scratch_cu);
+               sysfs_show_32bit_prop(buffer, "engine_id",
+                               dev->node_props.engine_id);
+               sysfs_show_32bit_prop(buffer, "vendor_id",
+                               dev->node_props.vendor_id);
+               sysfs_show_32bit_prop(buffer, "device_id",
+                               dev->node_props.device_id);
+               sysfs_show_32bit_prop(buffer, "location_id",
+                               dev->node_props.location_id);
+
+               if (dev->gpu) {
+                       sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
+                                       kfd2kgd->get_max_engine_clock_in_mhz(
+                                               dev->gpu->kgd));
+                       sysfs_show_64bit_prop(buffer, "local_mem_size",
+                                       kfd2kgd->get_vmem_size(dev->gpu->kgd));
+               }
+
+               ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
+                               cpufreq_quick_get_max(0)/1000);
+       }
+
+       return ret;
+}
+
+static const struct sysfs_ops node_ops = {
+       .show = node_show,
+};
+
+static struct kobj_type node_type = {
+       .sysfs_ops = &node_ops,
+};
+
+static void kfd_remove_sysfs_file(struct kobject *kobj, struct attribute *attr)
+{
+       sysfs_remove_file(kobj, attr);
+       kobject_del(kobj);
+       kobject_put(kobj);
+}
+
+static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
+{
+       struct kfd_iolink_properties *iolink;
+       struct kfd_cache_properties *cache;
+       struct kfd_mem_properties *mem;
+
+       BUG_ON(!dev);
+
+       if (dev->kobj_iolink) {
+               list_for_each_entry(iolink, &dev->io_link_props, list)
+                       if (iolink->kobj) {
+                               kfd_remove_sysfs_file(iolink->kobj,
+                                                       &iolink->attr);
+                               iolink->kobj = NULL;
+                       }
+               kobject_del(dev->kobj_iolink);
+               kobject_put(dev->kobj_iolink);
+               dev->kobj_iolink = NULL;
+       }
+
+       if (dev->kobj_cache) {
+               list_for_each_entry(cache, &dev->cache_props, list)
+                       if (cache->kobj) {
+                               kfd_remove_sysfs_file(cache->kobj,
+                                                       &cache->attr);
+                               cache->kobj = NULL;
+                       }
+               kobject_del(dev->kobj_cache);
+               kobject_put(dev->kobj_cache);
+               dev->kobj_cache = NULL;
+       }
+
+       if (dev->kobj_mem) {
+               list_for_each_entry(mem, &dev->mem_props, list)
+                       if (mem->kobj) {
+                               kfd_remove_sysfs_file(mem->kobj, &mem->attr);
+                               mem->kobj = NULL;
+                       }
+               kobject_del(dev->kobj_mem);
+               kobject_put(dev->kobj_mem);
+               dev->kobj_mem = NULL;
+       }
+
+       if (dev->kobj_node) {
+               sysfs_remove_file(dev->kobj_node, &dev->attr_gpuid);
+               sysfs_remove_file(dev->kobj_node, &dev->attr_name);
+               sysfs_remove_file(dev->kobj_node, &dev->attr_props);
+               kobject_del(dev->kobj_node);
+               kobject_put(dev->kobj_node);
+               dev->kobj_node = NULL;
+       }
+}
+
+static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
+               uint32_t id)
+{
+       struct kfd_iolink_properties *iolink;
+       struct kfd_cache_properties *cache;
+       struct kfd_mem_properties *mem;
+       int ret;
+       uint32_t i;
+
+       BUG_ON(!dev);
+
+       /*
+        * Creating the sysfs folders
+        */
+       BUG_ON(dev->kobj_node);
+       dev->kobj_node = kfd_alloc_struct(dev->kobj_node);
+       if (!dev->kobj_node)
+               return -ENOMEM;
+
+       ret = kobject_init_and_add(dev->kobj_node, &node_type,
+                       sys_props.kobj_nodes, "%d", id);
+       if (ret < 0)
+               return ret;
+
+       dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node);
+       if (!dev->kobj_mem)
+               return -ENOMEM;
+
+       dev->kobj_cache = kobject_create_and_add("caches", dev->kobj_node);
+       if (!dev->kobj_cache)
+               return -ENOMEM;
+
+       dev->kobj_iolink = kobject_create_and_add("io_links", dev->kobj_node);
+       if (!dev->kobj_iolink)
+               return -ENOMEM;
+
+       /*
+        * Creating sysfs files for node properties
+        */
+       dev->attr_gpuid.name = "gpu_id";
+       dev->attr_gpuid.mode = KFD_SYSFS_FILE_MODE;
+       sysfs_attr_init(&dev->attr_gpuid);
+       dev->attr_name.name = "name";
+       dev->attr_name.mode = KFD_SYSFS_FILE_MODE;
+       sysfs_attr_init(&dev->attr_name);
+       dev->attr_props.name = "properties";
+       dev->attr_props.mode = KFD_SYSFS_FILE_MODE;
+       sysfs_attr_init(&dev->attr_props);
+       ret = sysfs_create_file(dev->kobj_node, &dev->attr_gpuid);
+       if (ret < 0)
+               return ret;
+       ret = sysfs_create_file(dev->kobj_node, &dev->attr_name);
+       if (ret < 0)
+               return ret;
+       ret = sysfs_create_file(dev->kobj_node, &dev->attr_props);
+       if (ret < 0)
+               return ret;
+
+       i = 0;
+       list_for_each_entry(mem, &dev->mem_props, list) {
+               mem->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+               if (!mem->kobj)
+                       return -ENOMEM;
+               ret = kobject_init_and_add(mem->kobj, &mem_type,
+                               dev->kobj_mem, "%d", i);
+               if (ret < 0)
+                       return ret;
+
+               mem->attr.name = "properties";
+               mem->attr.mode = KFD_SYSFS_FILE_MODE;
+               sysfs_attr_init(&mem->attr);
+               ret = sysfs_create_file(mem->kobj, &mem->attr);
+               if (ret < 0)
+                       return ret;
+               i++;
+       }
+
+       i = 0;
+       list_for_each_entry(cache, &dev->cache_props, list) {
+               cache->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+               if (!cache->kobj)
+                       return -ENOMEM;
+               ret = kobject_init_and_add(cache->kobj, &cache_type,
+                               dev->kobj_cache, "%d", i);
+               if (ret < 0)
+                       return ret;
+
+               cache->attr.name = "properties";
+               cache->attr.mode = KFD_SYSFS_FILE_MODE;
+               sysfs_attr_init(&cache->attr);
+               ret = sysfs_create_file(cache->kobj, &cache->attr);
+               if (ret < 0)
+                       return ret;
+               i++;
+       }
+
+       i = 0;
+       list_for_each_entry(iolink, &dev->io_link_props, list) {
+               iolink->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+               if (!iolink->kobj)
+                       return -ENOMEM;
+               ret = kobject_init_and_add(iolink->kobj, &iolink_type,
+                               dev->kobj_iolink, "%d", i);
+               if (ret < 0)
+                       return ret;
+
+               iolink->attr.name = "properties";
+               iolink->attr.mode = KFD_SYSFS_FILE_MODE;
+               sysfs_attr_init(&iolink->attr);
+               ret = sysfs_create_file(iolink->kobj, &iolink->attr);
+               if (ret < 0)
+                       return ret;
+               i++;
+}
+
+       return 0;
+}
+
+static int kfd_build_sysfs_node_tree(void)
+{
+       struct kfd_topology_device *dev;
+       int ret;
+       uint32_t i = 0;
+
+       list_for_each_entry(dev, &topology_device_list, list) {
+               ret = kfd_build_sysfs_node_entry(dev, 0);
+               if (ret < 0)
+                       return ret;
+               i++;
+       }
+
+       return 0;
+}
+
+static void kfd_remove_sysfs_node_tree(void)
+{
+       struct kfd_topology_device *dev;
+
+       list_for_each_entry(dev, &topology_device_list, list)
+               kfd_remove_sysfs_node_entry(dev);
+}
+
+static int kfd_topology_update_sysfs(void)
+{
+       int ret;
+
+       pr_info("Creating topology SYSFS entries\n");
+       if (sys_props.kobj_topology == NULL) {
+               sys_props.kobj_topology =
+                               kfd_alloc_struct(sys_props.kobj_topology);
+               if (!sys_props.kobj_topology)
+                       return -ENOMEM;
+
+               ret = kobject_init_and_add(sys_props.kobj_topology,
+                               &sysprops_type,  &kfd_device->kobj,
+                               "topology");
+               if (ret < 0)
+                       return ret;
+
+               sys_props.kobj_nodes = kobject_create_and_add("nodes",
+                               sys_props.kobj_topology);
+               if (!sys_props.kobj_nodes)
+                       return -ENOMEM;
+
+               sys_props.attr_genid.name = "generation_id";
+               sys_props.attr_genid.mode = KFD_SYSFS_FILE_MODE;
+               sysfs_attr_init(&sys_props.attr_genid);
+               ret = sysfs_create_file(sys_props.kobj_topology,
+                               &sys_props.attr_genid);
+               if (ret < 0)
+                       return ret;
+
+               sys_props.attr_props.name = "system_properties";
+               sys_props.attr_props.mode = KFD_SYSFS_FILE_MODE;
+               sysfs_attr_init(&sys_props.attr_props);
+               ret = sysfs_create_file(sys_props.kobj_topology,
+                               &sys_props.attr_props);
+               if (ret < 0)
+                       return ret;
+       }
+
+       kfd_remove_sysfs_node_tree();
+
+       return kfd_build_sysfs_node_tree();
+}
+
+static void kfd_topology_release_sysfs(void)
+{
+       kfd_remove_sysfs_node_tree();
+       if (sys_props.kobj_topology) {
+               sysfs_remove_file(sys_props.kobj_topology,
+                               &sys_props.attr_genid);
+               sysfs_remove_file(sys_props.kobj_topology,
+                               &sys_props.attr_props);
+               if (sys_props.kobj_nodes) {
+                       kobject_del(sys_props.kobj_nodes);
+                       kobject_put(sys_props.kobj_nodes);
+                       sys_props.kobj_nodes = NULL;
+               }
+               kobject_del(sys_props.kobj_topology);
+               kobject_put(sys_props.kobj_topology);
+               sys_props.kobj_topology = NULL;
+       }
+}
+
+int kfd_topology_init(void)
+{
+       void *crat_image = NULL;
+       size_t image_size = 0;
+       int ret;
+
+       /*
+        * Initialize the head for the topology device list
+        */
+       INIT_LIST_HEAD(&topology_device_list);
+       init_rwsem(&topology_lock);
+       topology_crat_parsed = 0;
+
+       memset(&sys_props, 0, sizeof(sys_props));
+
+       /*
+        * Get the CRAT image from the ACPI
+        */
+       ret = kfd_topology_get_crat_acpi(crat_image, &image_size);
+       if (ret == 0 && image_size > 0) {
+               pr_info("Found CRAT image with size=%zd\n", image_size);
+               crat_image = kmalloc(image_size, GFP_KERNEL);
+               if (!crat_image) {
+                       ret = -ENOMEM;
+                       pr_err("No memory for allocating CRAT image\n");
+                       goto err;
+               }
+               ret = kfd_topology_get_crat_acpi(crat_image, &image_size);
+
+               if (ret == 0) {
+                       down_write(&topology_lock);
+                       ret = kfd_parse_crat_table(crat_image);
+                       if (ret == 0)
+                               ret = kfd_topology_update_sysfs();
+                       up_write(&topology_lock);
+               } else {
+                       pr_err("Couldn't get CRAT table size from ACPI\n");
+               }
+               kfree(crat_image);
+       } else if (ret == -ENODATA) {
+               ret = 0;
+       } else {
+               pr_err("Couldn't get CRAT table size from ACPI\n");
+       }
+
+err:
+       pr_info("Finished initializing topology ret=%d\n", ret);
+       return ret;
+}
+
+void kfd_topology_shutdown(void)
+{
+       kfd_topology_release_sysfs();
+       kfd_release_live_view();
+}
+
+static void kfd_debug_print_topology(void)
+{
+       struct kfd_topology_device *dev;
+       uint32_t i = 0;
+
+       pr_info("DEBUG PRINT OF TOPOLOGY:");
+       list_for_each_entry(dev, &topology_device_list, list) {
+               pr_info("Node: %d\n", i);
+               pr_info("\tGPU assigned: %s\n", (dev->gpu ? "yes" : "no"));
+               pr_info("\tCPU count: %d\n", dev->node_props.cpu_cores_count);
+               pr_info("\tSIMD count: %d", dev->node_props.simd_count);
+               i++;
+       }
+}
+
+static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
+{
+       uint32_t hashout;
+       uint32_t buf[7];
+       int i;
+
+       if (!gpu)
+               return 0;
+
+       buf[0] = gpu->pdev->devfn;
+       buf[1] = gpu->pdev->subsystem_vendor;
+       buf[2] = gpu->pdev->subsystem_device;
+       buf[3] = gpu->pdev->device;
+       buf[4] = gpu->pdev->bus->number;
+       buf[5] = (uint32_t)(kfd2kgd->get_vmem_size(gpu->kgd) & 0xffffffff);
+       buf[6] = (uint32_t)(kfd2kgd->get_vmem_size(gpu->kgd) >> 32);
+
+       for (i = 0, hashout = 0; i < 7; i++)
+               hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
+
+       return hashout;
+}
+
+static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
+{
+       struct kfd_topology_device *dev;
+       struct kfd_topology_device *out_dev = NULL;
+
+       BUG_ON(!gpu);
+
+       list_for_each_entry(dev, &topology_device_list, list)
+               if (dev->gpu == NULL && dev->node_props.simd_count > 0) {
+                       dev->gpu = gpu;
+                       out_dev = dev;
+                       break;
+               }
+
+       return out_dev;
+}
+
+static void kfd_notify_gpu_change(uint32_t gpu_id, int arrival)
+{
+       /*
+        * TODO: Generate an event for thunk about the arrival/removal
+        * of the GPU
+        */
+}
+
+int kfd_topology_add_device(struct kfd_dev *gpu)
+{
+       uint32_t gpu_id;
+       struct kfd_topology_device *dev;
+       int res;
+
+       BUG_ON(!gpu);
+
+       gpu_id = kfd_generate_gpu_id(gpu);
+
+       pr_debug("kfd: Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
+
+       down_write(&topology_lock);
+       /*
+        * Try to assign the GPU to existing topology device (generated from
+        * CRAT table
+        */
+       dev = kfd_assign_gpu(gpu);
+       if (!dev) {
+               pr_info("GPU was not found in the current topology. Extending.\n");
+               kfd_debug_print_topology();
+               dev = kfd_create_topology_device();
+               if (!dev) {
+                       res = -ENOMEM;
+                       goto err;
+               }
+               dev->gpu = gpu;
+
+               /*
+                * TODO: Make a call to retrieve topology information from the
+                * GPU vBIOS
+                */
+
+               /*
+                * Update the SYSFS tree, since we added another topology device
+                */
+               if (kfd_topology_update_sysfs() < 0)
+                       kfd_topology_release_sysfs();
+
+       }
+
+       dev->gpu_id = gpu_id;
+       gpu->id = gpu_id;
+       dev->node_props.vendor_id = gpu->pdev->vendor;
+       dev->node_props.device_id = gpu->pdev->device;
+       dev->node_props.location_id = (gpu->pdev->bus->number << 24) +
+                       (gpu->pdev->devfn & 0xffffff);
+       /*
+        * TODO: Retrieve max engine clock values from KGD
+        */
+
+       res = 0;
+
+err:
+       up_write(&topology_lock);
+
+       if (res == 0)
+               kfd_notify_gpu_change(gpu_id, 1);
+
+       return res;
+}
+
+int kfd_topology_remove_device(struct kfd_dev *gpu)
+{
+       struct kfd_topology_device *dev;
+       uint32_t gpu_id;
+       int res = -ENODEV;
+
+       BUG_ON(!gpu);
+
+       down_write(&topology_lock);
+
+       list_for_each_entry(dev, &topology_device_list, list)
+               if (dev->gpu == gpu) {
+                       gpu_id = dev->gpu_id;
+                       kfd_remove_sysfs_node_entry(dev);
+                       kfd_release_topology_device(dev);
+                       res = 0;
+                       if (kfd_topology_update_sysfs() < 0)
+                               kfd_topology_release_sysfs();
+                       break;
+               }
+
+       up_write(&topology_lock);
+
+       if (res == 0)
+               kfd_notify_gpu_change(gpu_id, 0);
+
+       return res;
+}
+
+/*
+ * When idx is out of bounds, the function will return NULL
+ */
+struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx)
+{
+
+       struct kfd_topology_device *top_dev;
+       struct kfd_dev *device = NULL;
+       uint8_t device_idx = 0;
+
+       down_read(&topology_lock);
+
+       list_for_each_entry(top_dev, &topology_device_list, list) {
+               if (device_idx == idx) {
+                       device = top_dev->gpu;
+                       break;
+               }
+
+               device_idx++;
+       }
+
+       up_read(&topology_lock);
+
+       return device;
+
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
new file mode 100644 (file)
index 0000000..989624b
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __KFD_TOPOLOGY_H__
+#define __KFD_TOPOLOGY_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include "kfd_priv.h"
+
+#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 128
+
+#define HSA_CAP_HOT_PLUGGABLE                  0x00000001
+#define HSA_CAP_ATS_PRESENT                    0x00000002
+#define HSA_CAP_SHARED_WITH_GRAPHICS           0x00000004
+#define HSA_CAP_QUEUE_SIZE_POW2                        0x00000008
+#define HSA_CAP_QUEUE_SIZE_32BIT               0x00000010
+#define HSA_CAP_QUEUE_IDLE_EVENT               0x00000020
+#define HSA_CAP_VA_LIMIT                       0x00000040
+#define HSA_CAP_WATCH_POINTS_SUPPORTED         0x00000080
+#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK    0x00000f00
+#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT   8
+#define HSA_CAP_RESERVED                       0xfffff000
+
+struct kfd_node_properties {
+       uint32_t cpu_cores_count;
+       uint32_t simd_count;
+       uint32_t mem_banks_count;
+       uint32_t caches_count;
+       uint32_t io_links_count;
+       uint32_t cpu_core_id_base;
+       uint32_t simd_id_base;
+       uint32_t capability;
+       uint32_t max_waves_per_simd;
+       uint32_t lds_size_in_kb;
+       uint32_t gds_size_in_kb;
+       uint32_t wave_front_size;
+       uint32_t array_count;
+       uint32_t simd_arrays_per_engine;
+       uint32_t cu_per_simd_array;
+       uint32_t simd_per_cu;
+       uint32_t max_slots_scratch_cu;
+       uint32_t engine_id;
+       uint32_t vendor_id;
+       uint32_t device_id;
+       uint32_t location_id;
+       uint32_t max_engine_clk_fcompute;
+       uint32_t max_engine_clk_ccompute;
+       uint16_t marketing_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
+};
+
+#define HSA_MEM_HEAP_TYPE_SYSTEM       0
+#define HSA_MEM_HEAP_TYPE_FB_PUBLIC    1
+#define HSA_MEM_HEAP_TYPE_FB_PRIVATE   2
+#define HSA_MEM_HEAP_TYPE_GPU_GDS      3
+#define HSA_MEM_HEAP_TYPE_GPU_LDS      4
+#define HSA_MEM_HEAP_TYPE_GPU_SCRATCH  5
+
+#define HSA_MEM_FLAGS_HOT_PLUGGABLE    0x00000001
+#define HSA_MEM_FLAGS_NON_VOLATILE     0x00000002
+#define HSA_MEM_FLAGS_RESERVED         0xfffffffc
+
+struct kfd_mem_properties {
+       struct list_head        list;
+       uint32_t                heap_type;
+       uint64_t                size_in_bytes;
+       uint32_t                flags;
+       uint32_t                width;
+       uint32_t                mem_clk_max;
+       struct kobject          *kobj;
+       struct attribute        attr;
+};
+
+#define KFD_TOPOLOGY_CPU_SIBLINGS 256
+
+#define HSA_CACHE_TYPE_DATA            0x00000001
+#define HSA_CACHE_TYPE_INSTRUCTION     0x00000002
+#define HSA_CACHE_TYPE_CPU             0x00000004
+#define HSA_CACHE_TYPE_HSACU           0x00000008
+#define HSA_CACHE_TYPE_RESERVED                0xfffffff0
+
+struct kfd_cache_properties {
+       struct list_head        list;
+       uint32_t                processor_id_low;
+       uint32_t                cache_level;
+       uint32_t                cache_size;
+       uint32_t                cacheline_size;
+       uint32_t                cachelines_per_tag;
+       uint32_t                cache_assoc;
+       uint32_t                cache_latency;
+       uint32_t                cache_type;
+       uint8_t                 sibling_map[KFD_TOPOLOGY_CPU_SIBLINGS];
+       struct kobject          *kobj;
+       struct attribute        attr;
+};
+
+struct kfd_iolink_properties {
+       struct list_head        list;
+       uint32_t                iolink_type;
+       uint32_t                ver_maj;
+       uint32_t                ver_min;
+       uint32_t                node_from;
+       uint32_t                node_to;
+       uint32_t                weight;
+       uint32_t                min_latency;
+       uint32_t                max_latency;
+       uint32_t                min_bandwidth;
+       uint32_t                max_bandwidth;
+       uint32_t                rec_transfer_size;
+       uint32_t                flags;
+       struct kobject          *kobj;
+       struct attribute        attr;
+};
+
+struct kfd_topology_device {
+       struct list_head                list;
+       uint32_t                        gpu_id;
+       struct kfd_node_properties      node_props;
+       uint32_t                        mem_bank_count;
+       struct list_head                mem_props;
+       uint32_t                        cache_count;
+       struct list_head                cache_props;
+       uint32_t                        io_link_count;
+       struct list_head                io_link_props;
+       struct kfd_dev                  *gpu;
+       struct kobject                  *kobj_node;
+       struct kobject                  *kobj_mem;
+       struct kobject                  *kobj_cache;
+       struct kobject                  *kobj_iolink;
+       struct attribute                attr_gpuid;
+       struct attribute                attr_name;
+       struct attribute                attr_props;
+};
+
+struct kfd_system_properties {
+       uint32_t                num_devices;     /* Number of H-NUMA nodes */
+       uint32_t                generation_count;
+       uint64_t                platform_oem;
+       uint64_t                platform_id;
+       uint64_t                platform_rev;
+       struct kobject          *kobj_topology;
+       struct kobject          *kobj_nodes;
+       struct attribute        attr_genid;
+       struct attribute        attr_props;
+};
+
+
+
+#endif /* __KFD_TOPOLOGY_H__ */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
new file mode 100644 (file)
index 0000000..9c729dd
--- /dev/null
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This file defines the private interface between the
+ * AMD kernel graphics drivers and the AMD KFD.
+ */
+
+#ifndef KGD_KFD_INTERFACE_H_INCLUDED
+#define KGD_KFD_INTERFACE_H_INCLUDED
+
+#include <linux/types.h>
+
+struct pci_dev;
+
+#define KFD_INTERFACE_VERSION 1
+
+struct kfd_dev;
+struct kgd_dev;
+
+struct kgd_mem;
+
+enum kgd_memory_pool {
+       KGD_POOL_SYSTEM_CACHEABLE = 1,
+       KGD_POOL_SYSTEM_WRITECOMBINE = 2,
+       KGD_POOL_FRAMEBUFFER = 3,
+};
+
+struct kgd2kfd_shared_resources {
+       /* Bit n == 1 means VMID n is available for KFD. */
+       unsigned int compute_vmid_bitmap;
+
+       /* Compute pipes are counted starting from MEC0/pipe0 as 0. */
+       unsigned int first_compute_pipe;
+
+       /* Number of MEC pipes available for KFD. */
+       unsigned int compute_pipe_count;
+
+       /* Base address of doorbell aperture. */
+       phys_addr_t doorbell_physical_address;
+
+       /* Size in bytes of doorbell aperture. */
+       size_t doorbell_aperture_size;
+
+       /* Number of bytes at start of aperture reserved for KGD. */
+       size_t doorbell_start_offset;
+};
+
+/**
+ * struct kgd2kfd_calls
+ *
+ * @exit: Notifies amdkfd that kgd module is unloaded
+ *
+ * @probe: Notifies amdkfd about a probe done on a device in the kgd driver.
+ *
+ * @device_init: Initialize the newly probed device (if it is a device that
+ * amdkfd supports)
+ *
+ * @device_exit: Notifies amdkfd about a removal of a kgd device
+ *
+ * @suspend: Notifies amdkfd about a suspend action done to a kgd device
+ *
+ * @resume: Notifies amdkfd about a resume action done to a kgd device
+ *
+ * This structure contains function callback pointers so the kgd driver
+ * will notify to the amdkfd about certain status changes.
+ *
+ */
+struct kgd2kfd_calls {
+       void (*exit)(void);
+       struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev);
+       bool (*device_init)(struct kfd_dev *kfd,
+                       const struct kgd2kfd_shared_resources *gpu_resources);
+       void (*device_exit)(struct kfd_dev *kfd);
+       void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
+       void (*suspend)(struct kfd_dev *kfd);
+       int (*resume)(struct kfd_dev *kfd);
+};
+
+/**
+ * struct kfd2kgd_calls
+ *
+ * @init_sa_manager: Initialize an instance of the sa manager, used by
+ * amdkfd for all system memory allocations that are mapped to the GART
+ * address space
+ *
+ * @fini_sa_manager: Releases all memory allocations for amdkfd that are
+ * handled by kgd sa manager
+ *
+ * @allocate_mem: Allocate a buffer from amdkfd's sa manager. The buffer can
+ * be used for mqds, hpds, kernel queue, fence and runlists
+ *
+ * @free_mem: Frees a buffer that was allocated by amdkfd's sa manager
+ *
+ * @get_vmem_size: Retrieves (physical) size of VRAM
+ *
+ * @get_gpu_clock_counter: Retrieves GPU clock counter
+ *
+ * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
+ *
+ * @program_sh_mem_settings: A function that should initiate the memory
+ * properties such as main aperture memory type (cache / non cached) and
+ * secondary aperture base address, size and memory type.
+ * This function is used only for no cp scheduling mode.
+ *
+ * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
+ * scheduling mode. Only used for no cp scheduling mode.
+ *
+ * @init_memory: Initializes memory apertures to fixed base/limit address
+ * and non cached memory types.
+ *
+ * @init_pipeline: Initialized the compute pipelines.
+ *
+ * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
+ * sceduling mode.
+ *
+ * @hqd_is_occupies: Checks if a hqd slot is occupied.
+ *
+ * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
+ *
+ * This structure contains function pointers to services that the kgd driver
+ * provides to amdkfd driver.
+ *
+ */
+struct kfd2kgd_calls {
+       /* Memory management. */
+       int (*init_sa_manager)(struct kgd_dev *kgd, unsigned int size);
+       void (*fini_sa_manager)(struct kgd_dev *kgd);
+       int (*allocate_mem)(struct kgd_dev *kgd, size_t size, size_t alignment,
+                       enum kgd_memory_pool pool, struct kgd_mem **mem);
+
+       void (*free_mem)(struct kgd_dev *kgd, struct kgd_mem *mem);
+
+       uint64_t (*get_vmem_size)(struct kgd_dev *kgd);
+       uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
+
+       uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
+
+       /* Register access functions */
+       void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
+                       uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
+                       uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
+
+       int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid,
+                                       unsigned int vmid);
+
+       int (*init_memory)(struct kgd_dev *kgd);
+       int (*init_pipeline)(struct kgd_dev *kgd, uint32_t pipe_id,
+                               uint32_t hpd_size, uint64_t hpd_gpu_addr);
+
+       int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+                       uint32_t queue_id, uint32_t __user *wptr);
+
+       bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address,
+                               uint32_t pipe_id, uint32_t queue_id);
+
+       int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
+                               unsigned int timeout, uint32_t pipe_id,
+                               uint32_t queue_id);
+};
+
+bool kgd2kfd_init(unsigned interface_version,
+                 const struct kfd2kgd_calls *f2g,
+                 const struct kgd2kfd_calls **g2f);
+
+#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
index e4a1490b42c280bbc49fd8cfcdc51a4c84b473fe..e3a7a5078e5ce1d5469fa740cc9bde74551ad090 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/platform_device.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
 #include "armada_crtc.h"
 #include "armada_drm.h"
 #include "armada_fb.h"
index 9dc0fd5c1ea4ef7741eaad8e85a65ea280096caf..b7ee2634e47cb420cb476c0f2a5906681b826601 100644 (file)
@@ -31,6 +31,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
 #include "ast_drv.h"
 
 #include "ast_tables.h"
index fe95d31cd1101118ab8be6174621fbd4692526f9..61dbf09dff5dca3549460039322406eb73fb7bf7 100644 (file)
@@ -9,6 +9,17 @@
 
 /* ---------------------------------------------------------------------- */
 
+static int bochsfb_mmap(struct fb_info *info,
+                       struct vm_area_struct *vma)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+       struct bochs_device *bochs =
+               container_of(fb_helper, struct bochs_device, fb.helper);
+       struct bochs_bo *bo = gem_to_bochs_bo(bochs->fb.gfb.obj);
+
+       return ttm_fbdev_mmap(vma, &bo->bo);
+}
+
 static struct fb_ops bochsfb_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
@@ -19,6 +30,7 @@ static struct fb_ops bochsfb_ops = {
        .fb_pan_display = drm_fb_helper_pan_display,
        .fb_blank = drm_fb_helper_blank,
        .fb_setcmap = drm_fb_helper_setcmap,
+       .fb_mmap = bochsfb_mmap,
 };
 
 static int bochsfb_create_object(struct bochs_device *bochs,
@@ -123,11 +135,9 @@ static int bochsfb_create(struct drm_fb_helper *helper,
        info->screen_base = bo->kmap.virtual;
        info->screen_size = size;
 
-#if 0
-       /* FIXME: get this right for mmap(/dev/fb0) */
-       info->fix.smem_start = bochs_bo_mmap_offset(bo);
+       drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node);
+       info->fix.smem_start = 0;
        info->fix.smem_len = size;
-#endif
 
        ret = fb_alloc_cmap(&info->cmap, 256, 0);
        if (ret) {
index dbe619e6aab4e28e2c5d58b8ee87f1bb91b7eb5c..460389702d31cdca34a56b9e20c93f6ad4ad6f05 100644 (file)
@@ -51,11 +51,10 @@ int bochs_hw_init(struct drm_device *dev, uint32_t flags)
 {
        struct bochs_device *bochs = dev->dev_private;
        struct pci_dev *pdev = dev->pdev;
-       unsigned long addr, size, mem, ioaddr, iosize;
+       unsigned long addr, size, mem, ioaddr, iosize, qext_size;
        u16 id;
 
-       if (/* (ent->driver_data == BOCHS_QEMU_STDVGA) && */
-           (pdev->resource[2].flags & IORESOURCE_MEM)) {
+       if (pdev->resource[2].flags & IORESOURCE_MEM) {
                /* mmio bar with vga and bochs registers present */
                if (pci_request_region(pdev, 2, "bochs-drm") != 0) {
                        DRM_ERROR("Cannot request mmio region\n");
@@ -116,6 +115,24 @@ int bochs_hw_init(struct drm_device *dev, uint32_t flags)
                 size / 1024, addr,
                 bochs->ioports ? "ioports" : "mmio",
                 ioaddr);
+
+       if (bochs->mmio && pdev->revision >= 2) {
+               qext_size = readl(bochs->mmio + 0x600);
+               if (qext_size < 4 || qext_size > iosize)
+                       goto noext;
+               DRM_DEBUG("Found qemu ext regs, size %ld\n", qext_size);
+               if (qext_size >= 8) {
+#ifdef __BIG_ENDIAN
+                       writel(0xbebebebe, bochs->mmio + 0x604);
+#else
+                       writel(0x1e1e1e1e, bochs->mmio + 0x604);
+#endif
+                       DRM_DEBUG("  qext endian: 0x%x\n",
+                                 readl(bochs->mmio + 0x604));
+               }
+       }
+
+noext:
        return 0;
 }
 
index 6b7efcf363d61df33e493326b54daa8ce6f36825..85f0f8cf1fb82974f14f5491acaca9dae02cfeb0 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include "bochs.h"
+#include <drm/drm_plane_helper.h>
 
 static int defx = 1024;
 static int defy = 768;
@@ -108,11 +109,32 @@ static void bochs_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
 {
 }
 
+static int bochs_crtc_page_flip(struct drm_crtc *crtc,
+                               struct drm_framebuffer *fb,
+                               struct drm_pending_vblank_event *event,
+                               uint32_t page_flip_flags)
+{
+       struct bochs_device *bochs =
+               container_of(crtc, struct bochs_device, crtc);
+       struct drm_framebuffer *old_fb = crtc->primary->fb;
+       unsigned long irqflags;
+
+       crtc->primary->fb = fb;
+       bochs_crtc_mode_set_base(crtc, 0, 0, old_fb);
+       if (event) {
+               spin_lock_irqsave(&bochs->dev->event_lock, irqflags);
+               drm_send_vblank_event(bochs->dev, -1, event);
+               spin_unlock_irqrestore(&bochs->dev->event_lock, irqflags);
+       }
+       return 0;
+}
+
 /* These provide the minimum set of functions required to handle a CRTC */
 static const struct drm_crtc_funcs bochs_crtc_funcs = {
        .gamma_set = bochs_crtc_gamma_set,
        .set_config = drm_crtc_helper_set_config,
        .destroy = drm_crtc_cleanup,
+       .page_flip = bochs_crtc_page_flip,
 };
 
 static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
index d44e69daa23966c0e2f38332069a81cc24070d0a..693a4565c4ffb2a0629d48ec206ba3bb4accf830 100644 (file)
@@ -210,6 +210,9 @@ int cirrus_framebuffer_init(struct drm_device *dev,
                            struct drm_mode_fb_cmd2 *mode_cmd,
                            struct drm_gem_object *obj);
 
+bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
+                             int bpp, int pitch);
+
                                /* cirrus_display.c */
 int cirrus_modeset_init(struct cirrus_device *cdev);
 void cirrus_modeset_fini(struct cirrus_device *cdev);
index d231b1c317afac94476e572b9560566311a38168..502a89eb54b51a7f146e9e61e54f03a0051c9b2d 100644 (file)
@@ -139,6 +139,7 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
                               struct drm_gem_object **gobj_p)
 {
        struct drm_device *dev = afbdev->helper.dev;
+       struct cirrus_device *cdev = dev->dev_private;
        u32 bpp, depth;
        u32 size;
        struct drm_gem_object *gobj;
@@ -146,8 +147,10 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
        int ret = 0;
        drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
 
-       if (bpp > 24)
+       if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
+                                     bpp, mode_cmd->pitches[0]))
                return -EINVAL;
+
        size = mode_cmd->pitches[0] * mode_cmd->height;
        ret = cirrus_gem_create(dev, size, true, &gobj);
        if (ret)
index 99c1983f99d228b77ce9c498236ad68b348440e2..4c2d68e9102d6304b8fd5cc655266300706f32da 100644 (file)
@@ -49,14 +49,16 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
                               struct drm_file *filp,
                               struct drm_mode_fb_cmd2 *mode_cmd)
 {
+       struct cirrus_device *cdev = dev->dev_private;
        struct drm_gem_object *obj;
        struct cirrus_framebuffer *cirrus_fb;
        int ret;
        u32 bpp, depth;
 
        drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
-       /* cirrus can't handle > 24bpp framebuffers at all */
-       if (bpp > 24)
+
+       if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
+                                     bpp, mode_cmd->pitches[0]))
                return ERR_PTR(-EINVAL);
 
        obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
@@ -96,8 +98,7 @@ static int cirrus_vram_init(struct cirrus_device *cdev)
 {
        /* BAR 0 is VRAM */
        cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
-       /* We have 4MB of VRAM */
-       cdev->mc.vram_size = 4 * 1024 * 1024;
+       cdev->mc.vram_size = pci_resource_len(cdev->dev->pdev, 0);
 
        if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
                                "cirrusdrmfb_vram")) {
@@ -179,17 +180,22 @@ int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
        }
 
        r = cirrus_mm_init(cdev);
-       if (r)
+       if (r) {
                dev_err(&dev->pdev->dev, "fatal err on mm init\n");
+               goto out;
+       }
 
        r = cirrus_modeset_init(cdev);
-       if (r)
+       if (r) {
                dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+               goto out;
+       }
 
        dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
+
+       return 0;
 out:
-       if (r)
-               cirrus_driver_unload(dev);
+       cirrus_driver_unload(dev);
        return r;
 }
 
@@ -307,3 +313,21 @@ out_unlock:
        return ret;
 
 }
+
+bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
+                             int bpp, int pitch)
+{
+       const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
+       const int max_size = cdev->mc.vram_size;
+
+       if (bpp > 32)
+               return false;
+
+       if (pitch > max_pitch)
+               return false;
+
+       if (pitch * height > max_size)
+               return false;
+
+       return true;
+}
index c7c5a9d91fa0082b2967a55fb8614bcdb6f74a1d..99d4a74ffeaffd2582ca78353ae2156a90c796f5 100644 (file)
@@ -16,6 +16,7 @@
  */
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
 
 #include <video/cirrus.h>
 
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
new file mode 100644 (file)
index 0000000..ff5f034
--- /dev/null
@@ -0,0 +1,657 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_plane_helper.h>
+
+static void kfree_state(struct drm_atomic_state *state)
+{
+       kfree(state->connectors);
+       kfree(state->connector_states);
+       kfree(state->crtcs);
+       kfree(state->crtc_states);
+       kfree(state->planes);
+       kfree(state->plane_states);
+       kfree(state);
+}
+
+/**
+ * drm_atomic_state_alloc - allocate atomic state
+ * @dev: DRM device
+ *
+ * This allocates an empty atomic state to track updates.
+ */
+struct drm_atomic_state *
+drm_atomic_state_alloc(struct drm_device *dev)
+{
+       struct drm_atomic_state *state;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return NULL;
+
+       state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
+
+       state->crtcs = kcalloc(dev->mode_config.num_crtc,
+                              sizeof(*state->crtcs), GFP_KERNEL);
+       if (!state->crtcs)
+               goto fail;
+       state->crtc_states = kcalloc(dev->mode_config.num_crtc,
+                                    sizeof(*state->crtc_states), GFP_KERNEL);
+       if (!state->crtc_states)
+               goto fail;
+       state->planes = kcalloc(dev->mode_config.num_total_plane,
+                               sizeof(*state->planes), GFP_KERNEL);
+       if (!state->planes)
+               goto fail;
+       state->plane_states = kcalloc(dev->mode_config.num_total_plane,
+                                     sizeof(*state->plane_states), GFP_KERNEL);
+       if (!state->plane_states)
+               goto fail;
+       state->connectors = kcalloc(state->num_connector,
+                                   sizeof(*state->connectors),
+                                   GFP_KERNEL);
+       if (!state->connectors)
+               goto fail;
+       state->connector_states = kcalloc(state->num_connector,
+                                         sizeof(*state->connector_states),
+                                         GFP_KERNEL);
+       if (!state->connector_states)
+               goto fail;
+
+       state->dev = dev;
+
+       DRM_DEBUG_KMS("Allocate atomic state %p\n", state);
+
+       return state;
+fail:
+       kfree_state(state);
+
+       return NULL;
+}
+EXPORT_SYMBOL(drm_atomic_state_alloc);
+
+/**
+ * drm_atomic_state_clear - clear state object
+ * @state: atomic state
+ *
+ * When the w/w mutex algorithm detects a deadlock we need to back off and drop
+ * all locks. So someone else could sneak in and change the current modeset
+ * configuration. Which means that all the state assembled in @state is no
+ * longer an atomic update to the current state, but to some arbitrary earlier
+ * state. Which could break assumptions the driver's ->atomic_check likely
+ * relies on.
+ *
+ * Hence we must clear all cached state and completely start over, using this
+ * function.
+ */
+void drm_atomic_state_clear(struct drm_atomic_state *state)
+{
+       struct drm_device *dev = state->dev;
+       struct drm_mode_config *config = &dev->mode_config;
+       int i;
+
+       DRM_DEBUG_KMS("Clearing atomic state %p\n", state);
+
+       for (i = 0; i < state->num_connector; i++) {
+               struct drm_connector *connector = state->connectors[i];
+
+               if (!connector)
+                       continue;
+
+               WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+
+               connector->funcs->atomic_destroy_state(connector,
+                                                      state->connector_states[i]);
+       }
+
+       for (i = 0; i < config->num_crtc; i++) {
+               struct drm_crtc *crtc = state->crtcs[i];
+
+               if (!crtc)
+                       continue;
+
+               crtc->funcs->atomic_destroy_state(crtc,
+                                                 state->crtc_states[i]);
+       }
+
+       for (i = 0; i < config->num_total_plane; i++) {
+               struct drm_plane *plane = state->planes[i];
+
+               if (!plane)
+                       continue;
+
+               plane->funcs->atomic_destroy_state(plane,
+                                                  state->plane_states[i]);
+       }
+}
+EXPORT_SYMBOL(drm_atomic_state_clear);
+
+/**
+ * drm_atomic_state_free - free all memory for an atomic state
+ * @state: atomic state to deallocate
+ *
+ * This frees all memory associated with an atomic state, including all the
+ * per-object state for planes, crtcs and connectors.
+ */
+void drm_atomic_state_free(struct drm_atomic_state *state)
+{
+       drm_atomic_state_clear(state);
+
+       DRM_DEBUG_KMS("Freeing atomic state %p\n", state);
+
+       kfree_state(state);
+}
+EXPORT_SYMBOL(drm_atomic_state_free);
+
+/**
+ * drm_atomic_get_crtc_state - get crtc state
+ * @state: global atomic state object
+ * @crtc: crtc to get state object for
+ *
+ * This function returns the crtc state for the given crtc, allocating it if
+ * needed. It will also grab the relevant crtc lock to make sure that the state
+ * is consistent.
+ *
+ * Returns:
+ *
+ * Either the allocated state or the error code encoded into the pointer. When
+ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
+ * entire atomic sequence must be restarted. All other errors are fatal.
+ */
+struct drm_crtc_state *
+drm_atomic_get_crtc_state(struct drm_atomic_state *state,
+                         struct drm_crtc *crtc)
+{
+       int ret, index;
+       struct drm_crtc_state *crtc_state;
+
+       index = drm_crtc_index(crtc);
+
+       if (state->crtc_states[index])
+               return state->crtc_states[index];
+
+       ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
+       if (ret)
+               return ERR_PTR(ret);
+
+       crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
+       if (!crtc_state)
+               return ERR_PTR(-ENOMEM);
+
+       state->crtc_states[index] = crtc_state;
+       state->crtcs[index] = crtc;
+       crtc_state->state = state;
+
+       DRM_DEBUG_KMS("Added [CRTC:%d] %p state to %p\n",
+                     crtc->base.id, crtc_state, state);
+
+       return crtc_state;
+}
+EXPORT_SYMBOL(drm_atomic_get_crtc_state);
+
+/**
+ * drm_atomic_get_plane_state - get plane state
+ * @state: global atomic state object
+ * @plane: plane to get state object for
+ *
+ * This function returns the plane state for the given plane, allocating it if
+ * needed. It will also grab the relevant plane lock to make sure that the state
+ * is consistent.
+ *
+ * Returns:
+ *
+ * Either the allocated state or the error code encoded into the pointer. When
+ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
+ * entire atomic sequence must be restarted. All other errors are fatal.
+ */
+struct drm_plane_state *
+drm_atomic_get_plane_state(struct drm_atomic_state *state,
+                         struct drm_plane *plane)
+{
+       int ret, index;
+       struct drm_plane_state *plane_state;
+
+       index = drm_plane_index(plane);
+
+       if (state->plane_states[index])
+               return state->plane_states[index];
+
+       ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
+       if (ret)
+               return ERR_PTR(ret);
+
+       plane_state = plane->funcs->atomic_duplicate_state(plane);
+       if (!plane_state)
+               return ERR_PTR(-ENOMEM);
+
+       state->plane_states[index] = plane_state;
+       state->planes[index] = plane;
+       plane_state->state = state;
+
+       DRM_DEBUG_KMS("Added [PLANE:%d] %p state to %p\n",
+                     plane->base.id, plane_state, state);
+
+       if (plane_state->crtc) {
+               struct drm_crtc_state *crtc_state;
+
+               crtc_state = drm_atomic_get_crtc_state(state,
+                                                      plane_state->crtc);
+               if (IS_ERR(crtc_state))
+                       return ERR_CAST(crtc_state);
+       }
+
+       return plane_state;
+}
+EXPORT_SYMBOL(drm_atomic_get_plane_state);
+
+/**
+ * drm_atomic_get_connector_state - get connector state
+ * @state: global atomic state object
+ * @connector: connector to get state object for
+ *
+ * This function returns the connector state for the given connector,
+ * allocating it if needed. It will also grab the relevant connector lock to
+ * make sure that the state is consistent.
+ *
+ * Returns:
+ *
+ * Either the allocated state or the error code encoded into the pointer. When
+ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
+ * entire atomic sequence must be restarted. All other errors are fatal.
+ */
+struct drm_connector_state *
+drm_atomic_get_connector_state(struct drm_atomic_state *state,
+                         struct drm_connector *connector)
+{
+       int ret, index;
+       struct drm_mode_config *config = &connector->dev->mode_config;
+       struct drm_connector_state *connector_state;
+
+       ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
+       if (ret)
+               return ERR_PTR(ret);
+
+       index = drm_connector_index(connector);
+
+       /*
+        * Construction of atomic state updates can race with a connector
+        * hot-add which might overflow. In this case flip the table and just
+        * restart the entire ioctl - no one is fast enough to livelock a cpu
+        * with physical hotplug events anyway.
+        *
+        * Note that we only grab the indexes once we have the right lock to
+        * prevent hotplug/unplugging of connectors. So removal is no problem,
+        * at most the array is a bit too large.
+        */
+       if (index >= state->num_connector) {
+               DRM_DEBUG_KMS("Hot-added connector would overflow state array, restarting\n");
+               return ERR_PTR(-EAGAIN);
+       }
+
+       if (state->connector_states[index])
+               return state->connector_states[index];
+
+       connector_state = connector->funcs->atomic_duplicate_state(connector);
+       if (!connector_state)
+               return ERR_PTR(-ENOMEM);
+
+       state->connector_states[index] = connector_state;
+       state->connectors[index] = connector;
+       connector_state->state = state;
+
+       DRM_DEBUG_KMS("Added [CONNECTOR:%d] %p state to %p\n",
+                     connector->base.id, connector_state, state);
+
+       if (connector_state->crtc) {
+               struct drm_crtc_state *crtc_state;
+
+               crtc_state = drm_atomic_get_crtc_state(state,
+                                                      connector_state->crtc);
+               if (IS_ERR(crtc_state))
+                       return ERR_CAST(crtc_state);
+       }
+
+       return connector_state;
+}
+EXPORT_SYMBOL(drm_atomic_get_connector_state);
+
+/**
+ * drm_atomic_set_crtc_for_plane - set crtc for plane
+ * @state: the incoming atomic state
+ * @plane: the plane whose incoming state to update
+ * @crtc: crtc to use for the plane
+ *
+ * Changing the assigned crtc for a plane requires us to grab the lock and state
+ * for the new crtc, as needed. This function takes care of all these details
+ * besides updating the pointer in the state object itself.
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
+                             struct drm_plane *plane, struct drm_crtc *crtc)
+{
+       struct drm_plane_state *plane_state =
+                       drm_atomic_get_plane_state(state, plane);
+       struct drm_crtc_state *crtc_state;
+
+       if (WARN_ON(IS_ERR(plane_state)))
+               return PTR_ERR(plane_state);
+
+       if (plane_state->crtc) {
+               crtc_state = drm_atomic_get_crtc_state(plane_state->state,
+                                                      plane_state->crtc);
+               if (WARN_ON(IS_ERR(crtc_state)))
+                       return PTR_ERR(crtc_state);
+
+               crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
+       }
+
+       plane_state->crtc = crtc;
+
+       if (crtc) {
+               crtc_state = drm_atomic_get_crtc_state(plane_state->state,
+                                                      crtc);
+               if (IS_ERR(crtc_state))
+                       return PTR_ERR(crtc_state);
+               crtc_state->plane_mask |= (1 << drm_plane_index(plane));
+       }
+
+       if (crtc)
+               DRM_DEBUG_KMS("Link plane state %p to [CRTC:%d]\n",
+                             plane_state, crtc->base.id);
+       else
+               DRM_DEBUG_KMS("Link plane state %p to [NOCRTC]\n", plane_state);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
+
+/**
+ * drm_atomic_set_fb_for_plane - set crtc for plane
+ * @plane_state: atomic state object for the plane
+ * @fb: fb to use for the plane
+ *
+ * Changing the assigned framebuffer for a plane requires us to grab a reference
+ * to the new fb and drop the reference to the old fb, if there is one. This
+ * function takes care of all these details besides updating the pointer in the
+ * state object itself.
+ */
+void
+drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
+                           struct drm_framebuffer *fb)
+{
+       if (plane_state->fb)
+               drm_framebuffer_unreference(plane_state->fb);
+       if (fb)
+               drm_framebuffer_reference(fb);
+       plane_state->fb = fb;
+
+       if (fb)
+               DRM_DEBUG_KMS("Set [FB:%d] for plane state %p\n",
+                             fb->base.id, plane_state);
+       else
+               DRM_DEBUG_KMS("Set [NOFB] for plane state %p\n", plane_state);
+}
+EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
+
+/**
+ * drm_atomic_set_crtc_for_connector - set crtc for connector
+ * @conn_state: atomic state object for the connector
+ * @crtc: crtc to use for the connector
+ *
+ * Changing the assigned crtc for a connector requires us to grab the lock and
+ * state for the new crtc, as needed. This function takes care of all these
+ * details besides updating the pointer in the state object itself.
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
+                                 struct drm_crtc *crtc)
+{
+       struct drm_crtc_state *crtc_state;
+
+       if (crtc) {
+               crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
+               if (IS_ERR(crtc_state))
+                       return PTR_ERR(crtc_state);
+       }
+
+       conn_state->crtc = crtc;
+
+       if (crtc)
+               DRM_DEBUG_KMS("Link connector state %p to [CRTC:%d]\n",
+                             conn_state, crtc->base.id);
+       else
+               DRM_DEBUG_KMS("Link connector state %p to [NOCRTC]\n",
+                             conn_state);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
+
+/**
+ * drm_atomic_add_affected_connectors - add connectors for crtc
+ * @state: atomic state
+ * @crtc: DRM crtc
+ *
+ * This function walks the current configuration and adds all connectors
+ * currently using @crtc to the atomic configuration @state. Note that this
+ * function must acquire the connection mutex. This can potentially cause
+ * unneeded seralization if the update is just for the planes on one crtc. Hence
+ * drivers and helpers should only call this when really needed (e.g. when a
+ * full modeset needs to happen due to some change).
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
+                                  struct drm_crtc *crtc)
+{
+       struct drm_mode_config *config = &state->dev->mode_config;
+       struct drm_connector *connector;
+       struct drm_connector_state *conn_state;
+       int ret;
+
+       ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
+       if (ret)
+               return ret;
+
+       DRM_DEBUG_KMS("Adding all current connectors for [CRTC:%d] to %p\n",
+                     crtc->base.id, state);
+
+       /*
+        * Changed connectors are already in @state, so only need to look at the
+        * current configuration.
+        */
+       list_for_each_entry(connector, &config->connector_list, head) {
+               if (connector->state->crtc != crtc)
+                       continue;
+
+               conn_state = drm_atomic_get_connector_state(state, connector);
+               if (IS_ERR(conn_state))
+                       return PTR_ERR(conn_state);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
+
+/**
+ * drm_atomic_connectors_for_crtc - count number of connected outputs
+ * @state: atomic state
+ * @crtc: DRM crtc
+ *
+ * This function counts all connectors which will be connected to @crtc
+ * according to @state. Useful to recompute the enable state for @crtc.
+ */
+int
+drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
+                              struct drm_crtc *crtc)
+{
+       int i, num_connected_connectors = 0;
+
+       for (i = 0; i < state->num_connector; i++) {
+               struct drm_connector_state *conn_state;
+
+               conn_state = state->connector_states[i];
+
+               if (conn_state && conn_state->crtc == crtc)
+                       num_connected_connectors++;
+       }
+
+       DRM_DEBUG_KMS("State %p has %i connectors for [CRTC:%d]\n",
+                     state, num_connected_connectors, crtc->base.id);
+
+       return num_connected_connectors;
+}
+EXPORT_SYMBOL(drm_atomic_connectors_for_crtc);
+
+/**
+ * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
+ * @state: atomic state
+ *
+ * This function should be used by legacy entry points which don't understand
+ * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
+ *  the slowpath completed.
+ */
+void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
+{
+       int ret;
+
+retry:
+       drm_modeset_backoff(state->acquire_ctx);
+
+       ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
+                              state->acquire_ctx);
+       if (ret)
+               goto retry;
+       ret = drm_modeset_lock_all_crtcs(state->dev,
+                                        state->acquire_ctx);
+       if (ret)
+               goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_legacy_backoff);
+
+/**
+ * drm_atomic_check_only - check whether a given config would work
+ * @state: atomic configuration to check
+ *
+ * Note that this function can return -EDEADLK if the driver needed to acquire
+ * more locks but encountered a deadlock. The caller must then do the usual w/w
+ * backoff dance and restart. All other errors are fatal.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int drm_atomic_check_only(struct drm_atomic_state *state)
+{
+       struct drm_mode_config *config = &state->dev->mode_config;
+
+       DRM_DEBUG_KMS("checking %p\n", state);
+
+       if (config->funcs->atomic_check)
+               return config->funcs->atomic_check(state->dev, state);
+       else
+               return 0;
+}
+EXPORT_SYMBOL(drm_atomic_check_only);
+
+/**
+ * drm_atomic_commit - commit configuration atomically
+ * @state: atomic configuration to check
+ *
+ * Note that this function can return -EDEADLK if the driver needed to acquire
+ * more locks but encountered a deadlock. The caller must then do the usual w/w
+ * backoff dance and restart. All other errors are fatal.
+ *
+ * Also note that on successful execution ownership of @state is transferred
+ * from the caller of this function to the function itself. The caller must not
+ * free or in any other way access @state. If the function fails then the caller
+ * must clean up @state itself.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int drm_atomic_commit(struct drm_atomic_state *state)
+{
+       struct drm_mode_config *config = &state->dev->mode_config;
+       int ret;
+
+       ret = drm_atomic_check_only(state);
+       if (ret)
+               return ret;
+
+       DRM_DEBUG_KMS("commiting %p\n", state);
+
+       return config->funcs->atomic_commit(state->dev, state, false);
+}
+EXPORT_SYMBOL(drm_atomic_commit);
+
+/**
+ * drm_atomic_async_commit - atomic&async configuration commit
+ * @state: atomic configuration to check
+ *
+ * Note that this function can return -EDEADLK if the driver needed to acquire
+ * more locks but encountered a deadlock. The caller must then do the usual w/w
+ * backoff dance and restart. All other errors are fatal.
+ *
+ * Also note that on successful execution ownership of @state is transferred
+ * from the caller of this function to the function itself. The caller must not
+ * free or in any other way access @state. If the function fails then the caller
+ * must clean up @state itself.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int drm_atomic_async_commit(struct drm_atomic_state *state)
+{
+       struct drm_mode_config *config = &state->dev->mode_config;
+       int ret;
+
+       ret = drm_atomic_check_only(state);
+       if (ret)
+               return ret;
+
+       DRM_DEBUG_KMS("commiting %p asynchronously\n", state);
+
+       return config->funcs->atomic_commit(state->dev, state, true);
+}
+EXPORT_SYMBOL(drm_atomic_async_commit);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
new file mode 100644 (file)
index 0000000..4a78a77
--- /dev/null
@@ -0,0 +1,1966 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <linux/fence.h>
+
+/**
+ * DOC: overview
+ *
+ * This helper library provides implementations of check and commit functions on
+ * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
+ * also provides convenience implementations for the atomic state handling
+ * callbacks for drivers which don't need to subclass the drm core structures to
+ * add their own additional internal state.
+ *
+ * This library also provides default implementations for the check callback in
+ * drm_atomic_helper_check and for the commit callback with
+ * drm_atomic_helper_commit. But the individual stages and callbacks are expose
+ * to allow drivers to mix and match and e.g. use the plane helpers only
+ * together with a driver private modeset implementation.
+ *
+ * This library also provides implementations for all the legacy driver
+ * interfaces on top of the atomic interface. See drm_atomic_helper_set_config,
+ * drm_atomic_helper_disable_plane, drm_atomic_helper_disable_plane and the
+ * various functions to implement set_property callbacks. New drivers must not
+ * implement these functions themselves but must use the provided helpers.
+ */
+static void
+drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
+                               struct drm_plane_state *plane_state,
+                               struct drm_plane *plane)
+{
+       struct drm_crtc_state *crtc_state;
+
+       if (plane->state->crtc) {
+               crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)];
+
+               if (WARN_ON(!crtc_state))
+                       return;
+
+               crtc_state->planes_changed = true;
+       }
+
+       if (plane_state->crtc) {
+               crtc_state =
+                       state->crtc_states[drm_crtc_index(plane_state->crtc)];
+
+               if (WARN_ON(!crtc_state))
+                       return;
+
+               crtc_state->planes_changed = true;
+       }
+}
+
+static struct drm_crtc *
+get_current_crtc_for_encoder(struct drm_device *dev,
+                            struct drm_encoder *encoder)
+{
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_connector *connector;
+
+       WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+
+       list_for_each_entry(connector, &config->connector_list, head) {
+               if (connector->state->best_encoder != encoder)
+                       continue;
+
+               return connector->state->crtc;
+       }
+
+       return NULL;
+}
+
+static int
+steal_encoder(struct drm_atomic_state *state,
+             struct drm_encoder *encoder,
+             struct drm_crtc *encoder_crtc)
+{
+       struct drm_mode_config *config = &state->dev->mode_config;
+       struct drm_crtc_state *crtc_state;
+       struct drm_connector *connector;
+       struct drm_connector_state *connector_state;
+       int ret;
+
+       /*
+        * We can only steal an encoder coming from a connector, which means we
+        * must already hold the connection_mutex.
+        */
+       WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+
+       DRM_DEBUG_KMS("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n",
+                     encoder->base.id, encoder->name,
+                     encoder_crtc->base.id);
+
+       crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
+       if (IS_ERR(crtc_state))
+               return PTR_ERR(crtc_state);
+
+       crtc_state->mode_changed = true;
+
+       list_for_each_entry(connector, &config->connector_list, head) {
+               if (connector->state->best_encoder != encoder)
+                       continue;
+
+               DRM_DEBUG_KMS("Stealing encoder from [CONNECTOR:%d:%s]\n",
+                             connector->base.id,
+                             connector->name);
+
+               connector_state = drm_atomic_get_connector_state(state,
+                                                                connector);
+               if (IS_ERR(connector_state))
+                       return PTR_ERR(connector_state);
+
+               ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
+               if (ret)
+                       return ret;
+               connector_state->best_encoder = NULL;
+       }
+
+       return 0;
+}
+
+static int
+update_connector_routing(struct drm_atomic_state *state, int conn_idx)
+{
+       struct drm_connector_helper_funcs *funcs;
+       struct drm_encoder *new_encoder;
+       struct drm_crtc *encoder_crtc;
+       struct drm_connector *connector;
+       struct drm_connector_state *connector_state;
+       struct drm_crtc_state *crtc_state;
+       int idx, ret;
+
+       connector = state->connectors[conn_idx];
+       connector_state = state->connector_states[conn_idx];
+
+       if (!connector)
+               return 0;
+
+       DRM_DEBUG_KMS("Updating routing for [CONNECTOR:%d:%s]\n",
+                       connector->base.id,
+                       connector->name);
+
+       if (connector->state->crtc != connector_state->crtc) {
+               if (connector->state->crtc) {
+                       idx = drm_crtc_index(connector->state->crtc);
+
+                       crtc_state = state->crtc_states[idx];
+                       crtc_state->mode_changed = true;
+               }
+
+               if (connector_state->crtc) {
+                       idx = drm_crtc_index(connector_state->crtc);
+
+                       crtc_state = state->crtc_states[idx];
+                       crtc_state->mode_changed = true;
+               }
+       }
+
+       if (!connector_state->crtc) {
+               DRM_DEBUG_KMS("Disabling [CONNECTOR:%d:%s]\n",
+                               connector->base.id,
+                               connector->name);
+
+               connector_state->best_encoder = NULL;
+
+               return 0;
+       }
+
+       funcs = connector->helper_private;
+       new_encoder = funcs->best_encoder(connector);
+
+       if (!new_encoder) {
+               DRM_DEBUG_KMS("No suitable encoder found for [CONNECTOR:%d:%s]\n",
+                             connector->base.id,
+                             connector->name);
+               return -EINVAL;
+       }
+
+       if (new_encoder == connector_state->best_encoder) {
+               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
+                             connector->base.id,
+                             connector->name,
+                             new_encoder->base.id,
+                             new_encoder->name,
+                             connector_state->crtc->base.id);
+
+               return 0;
+       }
+
+       encoder_crtc = get_current_crtc_for_encoder(state->dev,
+                                                   new_encoder);
+
+       if (encoder_crtc) {
+               ret = steal_encoder(state, new_encoder, encoder_crtc);
+               if (ret) {
+                       DRM_DEBUG_KMS("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
+                                     connector->base.id,
+                                     connector->name);
+                       return ret;
+               }
+       }
+
+       connector_state->best_encoder = new_encoder;
+       idx = drm_crtc_index(connector_state->crtc);
+
+       crtc_state = state->crtc_states[idx];
+       crtc_state->mode_changed = true;
+
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
+                     connector->base.id,
+                     connector->name,
+                     new_encoder->base.id,
+                     new_encoder->name,
+                     connector_state->crtc->base.id);
+
+       return 0;
+}
+
+static int
+mode_fixup(struct drm_atomic_state *state)
+{
+       int ncrtcs = state->dev->mode_config.num_crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_connector_state *conn_state;
+       int i;
+       bool ret;
+
+       for (i = 0; i < ncrtcs; i++) {
+               crtc_state = state->crtc_states[i];
+
+               if (!crtc_state || !crtc_state->mode_changed)
+                       continue;
+
+               drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode);
+       }
+
+       for (i = 0; i < state->num_connector; i++) {
+               struct drm_encoder_helper_funcs *funcs;
+               struct drm_encoder *encoder;
+
+               conn_state = state->connector_states[i];
+
+               if (!conn_state)
+                       continue;
+
+               WARN_ON(!!conn_state->best_encoder != !!conn_state->crtc);
+
+               if (!conn_state->crtc || !conn_state->best_encoder)
+                       continue;
+
+               crtc_state =
+                       state->crtc_states[drm_crtc_index(conn_state->crtc)];
+
+               /*
+                * Each encoder has at most one connector (since we always steal
+                * it away), so we won't call ->mode_fixup twice.
+                */
+               encoder = conn_state->best_encoder;
+               funcs = encoder->helper_private;
+
+               if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
+                       ret = encoder->bridge->funcs->mode_fixup(
+                                       encoder->bridge, &crtc_state->mode,
+                                       &crtc_state->adjusted_mode);
+                       if (!ret) {
+                               DRM_DEBUG_KMS("Bridge fixup failed\n");
+                               return -EINVAL;
+                       }
+               }
+
+
+               ret = funcs->mode_fixup(encoder, &crtc_state->mode,
+                                       &crtc_state->adjusted_mode);
+               if (!ret) {
+                       DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n",
+                                     encoder->base.id, encoder->name);
+                       return -EINVAL;
+               }
+       }
+
+       for (i = 0; i < ncrtcs; i++) {
+               struct drm_crtc_helper_funcs *funcs;
+               struct drm_crtc *crtc;
+
+               crtc_state = state->crtc_states[i];
+               crtc = state->crtcs[i];
+
+               if (!crtc_state || !crtc_state->mode_changed)
+                       continue;
+
+               funcs = crtc->helper_private;
+               ret = funcs->mode_fixup(crtc, &crtc_state->mode,
+                                       &crtc_state->adjusted_mode);
+               if (!ret) {
+                       DRM_DEBUG_KMS("[CRTC:%d] fixup failed\n",
+                                     crtc->base.id);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int
+drm_atomic_helper_check_modeset(struct drm_device *dev,
+                               struct drm_atomic_state *state)
+{
+       int ncrtcs = dev->mode_config.num_crtc;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       int i, ret;
+
+       for (i = 0; i < ncrtcs; i++) {
+               crtc = state->crtcs[i];
+               crtc_state = state->crtc_states[i];
+
+               if (!crtc)
+                       continue;
+
+               if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) {
+                       DRM_DEBUG_KMS("[CRTC:%d] mode changed\n",
+                                     crtc->base.id);
+                       crtc_state->mode_changed = true;
+               }
+
+               if (crtc->state->enable != crtc_state->enable) {
+                       DRM_DEBUG_KMS("[CRTC:%d] enable changed\n",
+                                     crtc->base.id);
+                       crtc_state->mode_changed = true;
+               }
+       }
+
+       for (i = 0; i < state->num_connector; i++) {
+               /*
+                * This only sets crtc->mode_changed for routing changes,
+                * drivers must set crtc->mode_changed themselves when connector
+                * properties need to be updated.
+                */
+               ret = update_connector_routing(state, i);
+               if (ret)
+                       return ret;
+       }
+
+       /*
+        * After all the routing has been prepared we need to add in any
+        * connector which is itself unchanged, but who's crtc changes it's
+        * configuration. This must be done before calling mode_fixup in case a
+        * crtc only changed its mode but has the same set of connectors.
+        */
+       for (i = 0; i < ncrtcs; i++) {
+               int num_connectors;
+
+               crtc = state->crtcs[i];
+               crtc_state = state->crtc_states[i];
+
+               if (!crtc || !crtc_state->mode_changed)
+                       continue;
+
+               DRM_DEBUG_KMS("[CRTC:%d] needs full modeset, enable: %c\n",
+                             crtc->base.id,
+                             crtc_state->enable ? 'y' : 'n');
+
+               ret = drm_atomic_add_affected_connectors(state, crtc);
+               if (ret != 0)
+                       return ret;
+
+               num_connectors = drm_atomic_connectors_for_crtc(state,
+                                                               crtc);
+
+               if (crtc_state->enable != !!num_connectors) {
+                       DRM_DEBUG_KMS("[CRTC:%d] enabled/connectors mismatch\n",
+                                     crtc->base.id);
+
+                       return -EINVAL;
+               }
+       }
+
+       return mode_fixup(state);
+}
+
+/**
+ * drm_atomic_helper_check - validate state object
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Check the state object to see if the requested state is physically possible.
+ * Only crtcs and planes have check callbacks, so for any additional (global)
+ * checking that a driver needs it can simply wrap that around this function.
+ * Drivers without such needs can directly use this as their ->atomic_check()
+ * callback.
+ *
+ * RETURNS
+ * Zero for success or -errno
+ */
+int drm_atomic_helper_check(struct drm_device *dev,
+                           struct drm_atomic_state *state)
+{
+       int nplanes = dev->mode_config.num_total_plane;
+       int ncrtcs = dev->mode_config.num_crtc;
+       int i, ret = 0;
+
+       for (i = 0; i < nplanes; i++) {
+               struct drm_plane_helper_funcs *funcs;
+               struct drm_plane *plane = state->planes[i];
+               struct drm_plane_state *plane_state = state->plane_states[i];
+
+               if (!plane)
+                       continue;
+
+               funcs = plane->helper_private;
+
+               drm_atomic_helper_plane_changed(state, plane_state, plane);
+
+               if (!funcs || !funcs->atomic_check)
+                       continue;
+
+               ret = funcs->atomic_check(plane, plane_state);
+               if (ret) {
+                       DRM_DEBUG_KMS("[PLANE:%d] atomic check failed\n",
+                                     plane->base.id);
+                       return ret;
+               }
+       }
+
+       for (i = 0; i < ncrtcs; i++) {
+               struct drm_crtc_helper_funcs *funcs;
+               struct drm_crtc *crtc = state->crtcs[i];
+
+               if (!crtc)
+                       continue;
+
+               funcs = crtc->helper_private;
+
+               if (!funcs || !funcs->atomic_check)
+                       continue;
+
+               ret = funcs->atomic_check(crtc, state->crtc_states[i]);
+               if (ret) {
+                       DRM_DEBUG_KMS("[CRTC:%d] atomic check failed\n",
+                                     crtc->base.id);
+                       return ret;
+               }
+       }
+
+       ret = drm_atomic_helper_check_modeset(dev, state);
+       if (ret)
+               return ret;
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_check);
+
+static void
+disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+       int ncrtcs = old_state->dev->mode_config.num_crtc;
+       int i;
+
+       for (i = 0; i < old_state->num_connector; i++) {
+               struct drm_connector_state *old_conn_state;
+               struct drm_connector *connector;
+               struct drm_encoder_helper_funcs *funcs;
+               struct drm_encoder *encoder;
+
+               old_conn_state = old_state->connector_states[i];
+               connector = old_state->connectors[i];
+
+               /* Shut down everything that's in the changeset and currently
+                * still on. So need to check the old, saved state. */
+               if (!old_conn_state || !old_conn_state->crtc)
+                       continue;
+
+               encoder = old_conn_state->best_encoder;
+
+               /* We shouldn't get this far if we didn't previously have
+                * an encoder.. but WARN_ON() rather than explode.
+                */
+               if (WARN_ON(!encoder))
+                       continue;
+
+               funcs = encoder->helper_private;
+
+               /*
+                * Each encoder has at most one connector (since we always steal
+                * it away), so we won't call call disable hooks twice.
+                */
+               if (encoder->bridge)
+                       encoder->bridge->funcs->disable(encoder->bridge);
+
+               /* Right function depends upon target state. */
+               if (connector->state->crtc)
+                       funcs->prepare(encoder);
+               else if (funcs->disable)
+                       funcs->disable(encoder);
+               else
+                       funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+               if (encoder->bridge)
+                       encoder->bridge->funcs->post_disable(encoder->bridge);
+       }
+
+       for (i = 0; i < ncrtcs; i++) {
+               struct drm_crtc_helper_funcs *funcs;
+               struct drm_crtc *crtc;
+
+               crtc = old_state->crtcs[i];
+
+               /* Shut down everything that needs a full modeset. */
+               if (!crtc || !crtc->state->mode_changed)
+                       continue;
+
+               funcs = crtc->helper_private;
+
+               /* Right function depends upon target state. */
+               if (crtc->state->enable)
+                       funcs->prepare(crtc);
+               else if (funcs->disable)
+                       funcs->disable(crtc);
+               else
+                       funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+       }
+}
+
+static void
+set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+       int ncrtcs = old_state->dev->mode_config.num_crtc;
+       int i;
+
+       /* clear out existing links */
+       for (i = 0; i < old_state->num_connector; i++) {
+               struct drm_connector *connector;
+
+               connector = old_state->connectors[i];
+
+               if (!connector || !connector->encoder)
+                       continue;
+
+               WARN_ON(!connector->encoder->crtc);
+
+               connector->encoder->crtc = NULL;
+               connector->encoder = NULL;
+       }
+
+       /* set new links */
+       for (i = 0; i < old_state->num_connector; i++) {
+               struct drm_connector *connector;
+
+               connector = old_state->connectors[i];
+
+               if (!connector || !connector->state->crtc)
+                       continue;
+
+               if (WARN_ON(!connector->state->best_encoder))
+                       continue;
+
+               connector->encoder = connector->state->best_encoder;
+               connector->encoder->crtc = connector->state->crtc;
+       }
+
+       /* set legacy state in the crtc structure */
+       for (i = 0; i < ncrtcs; i++) {
+               struct drm_crtc *crtc;
+
+               crtc = old_state->crtcs[i];
+
+               if (!crtc)
+                       continue;
+
+               crtc->mode = crtc->state->mode;
+               crtc->enabled = crtc->state->enable;
+               crtc->x = crtc->primary->state->src_x >> 16;
+               crtc->y = crtc->primary->state->src_y >> 16;
+       }
+}
+
+static void
+crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+       int ncrtcs = old_state->dev->mode_config.num_crtc;
+       int i;
+
+       for (i = 0; i < ncrtcs; i++) {
+               struct drm_crtc_helper_funcs *funcs;
+               struct drm_crtc *crtc;
+
+               crtc = old_state->crtcs[i];
+
+               if (!crtc || !crtc->state->mode_changed)
+                       continue;
+
+               funcs = crtc->helper_private;
+
+               if (crtc->state->enable)
+                       funcs->mode_set_nofb(crtc);
+       }
+
+       for (i = 0; i < old_state->num_connector; i++) {
+               struct drm_connector *connector;
+               struct drm_crtc_state *new_crtc_state;
+               struct drm_encoder_helper_funcs *funcs;
+               struct drm_encoder *encoder;
+               struct drm_display_mode *mode, *adjusted_mode;
+
+               connector = old_state->connectors[i];
+
+               if (!connector || !connector->state->best_encoder)
+                       continue;
+
+               encoder = connector->state->best_encoder;
+               funcs = encoder->helper_private;
+               new_crtc_state = connector->state->crtc->state;
+               mode = &new_crtc_state->mode;
+               adjusted_mode = &new_crtc_state->adjusted_mode;
+
+               /*
+                * Each encoder has at most one connector (since we always steal
+                * it away), so we won't call call mode_set hooks twice.
+                */
+               funcs->mode_set(encoder, mode, adjusted_mode);
+
+               if (encoder->bridge && encoder->bridge->funcs->mode_set)
+                       encoder->bridge->funcs->mode_set(encoder->bridge,
+                                                        mode, adjusted_mode);
+       }
+}
+
+/**
+ * drm_atomic_helper_commit_pre_planes - modeset commit before plane updates
+ * @dev: DRM device
+ * @state: atomic state
+ *
+ * This function commits the modeset changes that need to be committed before
+ * updating planes. It shuts down all the outputs that need to be shut down and
+ * prepares them (if required) with the new mode.
+ */
+void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
+                                        struct drm_atomic_state *state)
+{
+       disable_outputs(dev, state);
+       set_routing_links(dev, state);
+       crtc_set_mode(dev, state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_pre_planes);
+
+/**
+ * drm_atomic_helper_commit_post_planes - modeset commit after plane updates
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function commits the modeset changes that need to be committed after
+ * updating planes: It enables all the outputs with the new configuration which
+ * had to be turned off for the update.
+ */
+void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
+                                         struct drm_atomic_state *old_state)
+{
+       int ncrtcs = old_state->dev->mode_config.num_crtc;
+       int i;
+
+       for (i = 0; i < ncrtcs; i++) {
+               struct drm_crtc_helper_funcs *funcs;
+               struct drm_crtc *crtc;
+
+               crtc = old_state->crtcs[i];
+
+               /* Need to filter out CRTCs where only planes change. */
+               if (!crtc || !crtc->state->mode_changed)
+                       continue;
+
+               funcs = crtc->helper_private;
+
+               if (crtc->state->enable)
+                       funcs->commit(crtc);
+       }
+
+       for (i = 0; i < old_state->num_connector; i++) {
+               struct drm_connector *connector;
+               struct drm_encoder_helper_funcs *funcs;
+               struct drm_encoder *encoder;
+
+               connector = old_state->connectors[i];
+
+               if (!connector || !connector->state->best_encoder)
+                       continue;
+
+               encoder = connector->state->best_encoder;
+               funcs = encoder->helper_private;
+
+               /*
+                * Each encoder has at most one connector (since we always steal
+                * it away), so we won't call call enable hooks twice.
+                */
+               if (encoder->bridge)
+                       encoder->bridge->funcs->pre_enable(encoder->bridge);
+
+               funcs->commit(encoder);
+
+               if (encoder->bridge)
+                       encoder->bridge->funcs->enable(encoder->bridge);
+       }
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_post_planes);
+
+static void wait_for_fences(struct drm_device *dev,
+                           struct drm_atomic_state *state)
+{
+       int nplanes = dev->mode_config.num_total_plane;
+       int i;
+
+       for (i = 0; i < nplanes; i++) {
+               struct drm_plane *plane = state->planes[i];
+
+               if (!plane || !plane->state->fence)
+                       continue;
+
+               WARN_ON(!plane->state->fb);
+
+               fence_wait(plane->state->fence, false);
+               fence_put(plane->state->fence);
+               plane->state->fence = NULL;
+       }
+}
+
+static bool framebuffer_changed(struct drm_device *dev,
+                               struct drm_atomic_state *old_state,
+                               struct drm_crtc *crtc)
+{
+       struct drm_plane *plane;
+       struct drm_plane_state *old_plane_state;
+       int nplanes = old_state->dev->mode_config.num_total_plane;
+       int i;
+
+       for (i = 0; i < nplanes; i++) {
+               plane = old_state->planes[i];
+               old_plane_state = old_state->plane_states[i];
+
+               if (!plane)
+                       continue;
+
+               if (plane->state->crtc != crtc &&
+                   old_plane_state->crtc != crtc)
+                       continue;
+
+               if (plane->state->fb != old_plane_state->fb)
+                       return true;
+       }
+
+       return false;
+}
+
+/**
+ * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * Helper to, after atomic commit, wait for vblanks on all effected
+ * crtcs (ie. before cleaning up old framebuffers using
+ * drm_atomic_helper_cleanup_planes()). It will only wait on crtcs where the
+ * framebuffers have actually changed to optimize for the legacy cursor and
+ * plane update use-case.
+ */
+void
+drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
+               struct drm_atomic_state *old_state)
+{
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *old_crtc_state;
+       int ncrtcs = old_state->dev->mode_config.num_crtc;
+       int i, ret;
+
+       for (i = 0; i < ncrtcs; i++) {
+               crtc = old_state->crtcs[i];
+               old_crtc_state = old_state->crtc_states[i];
+
+               if (!crtc)
+                       continue;
+
+               /* No one cares about the old state, so abuse it for tracking
+                * and store whether we hold a vblank reference (and should do a
+                * vblank wait) in the ->enable boolean. */
+               old_crtc_state->enable = false;
+
+               if (!crtc->state->enable)
+                       continue;
+
+               if (!framebuffer_changed(dev, old_state, crtc))
+                       continue;
+
+               ret = drm_crtc_vblank_get(crtc);
+               if (ret != 0)
+                       continue;
+
+               old_crtc_state->enable = true;
+               old_crtc_state->last_vblank_count = drm_vblank_count(dev, i);
+       }
+
+       for (i = 0; i < ncrtcs; i++) {
+               crtc = old_state->crtcs[i];
+               old_crtc_state = old_state->crtc_states[i];
+
+               if (!crtc || !old_crtc_state->enable)
+                       continue;
+
+               ret = wait_event_timeout(dev->vblank[i].queue,
+                               old_crtc_state->last_vblank_count !=
+                                       drm_vblank_count(dev, i),
+                               msecs_to_jiffies(50));
+
+               drm_crtc_vblank_put(crtc);
+       }
+}
+EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
+
+/**
+ * drm_atomic_helper_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a with drm_atomic_helper_check() pre-validated state
+ * object. This can still fail when e.g. the framebuffer reservation fails. For
+ * now this doesn't implement asynchronous commits.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+int drm_atomic_helper_commit(struct drm_device *dev,
+                            struct drm_atomic_state *state,
+                            bool async)
+{
+       int ret;
+
+       if (async)
+               return -EBUSY;
+
+       ret = drm_atomic_helper_prepare_planes(dev, state);
+       if (ret)
+               return ret;
+
+       /*
+        * This is the point of no return - everything below never fails except
+        * when the hw goes bonghits. Which means we can commit the new state on
+        * the software side now.
+        */
+
+       drm_atomic_helper_swap_state(dev, state);
+
+       /*
+        * Everything below can be run asynchronously without the need to grab
+        * any modeset locks at all under one conditions: It must be guaranteed
+        * that the asynchronous work has either been cancelled (if the driver
+        * supports it, which at least requires that the framebuffers get
+        * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
+        * before the new state gets committed on the software side with
+        * drm_atomic_helper_swap_state().
+        *
+        * This scheme allows new atomic state updates to be prepared and
+        * checked in parallel to the asynchronous completion of the previous
+        * update. Which is important since compositors need to figure out the
+        * composition of the next frame right after having submitted the
+        * current layout.
+        */
+
+       wait_for_fences(dev, state);
+
+       drm_atomic_helper_commit_pre_planes(dev, state);
+
+       drm_atomic_helper_commit_planes(dev, state);
+
+       drm_atomic_helper_commit_post_planes(dev, state);
+
+       drm_atomic_helper_wait_for_vblanks(dev, state);
+
+       drm_atomic_helper_cleanup_planes(dev, state);
+
+       drm_atomic_state_free(state);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit);
+
+/**
+ * DOC: implementing async commit
+ *
+ * For now the atomic helpers don't support async commit directly. If there is
+ * real need it could be added though, using the dma-buf fence infrastructure
+ * for generic synchronization with outstanding rendering.
+ *
+ * For now drivers have to implement async commit themselves, with the following
+ * sequence being the recommended one:
+ *
+ * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
+ * which commit needs to call which can fail, so we want to run it first and
+ * synchronously.
+ *
+ * 2. Synchronize with any outstanding asynchronous commit worker threads which
+ * might be affected the new state update. This can be done by either cancelling
+ * or flushing the work items, depending upon whether the driver can deal with
+ * cancelled updates. Note that it is important to ensure that the framebuffer
+ * cleanup is still done when cancelling.
+ *
+ * For sufficient parallelism it is recommended to have a work item per crtc
+ * (for updates which don't touch global state) and a global one. Then we only
+ * need to synchronize with the crtc work items for changed crtcs and the global
+ * work item, which allows nice concurrent updates on disjoint sets of crtcs.
+ *
+ * 3. The software state is updated synchronously with
+ * drm_atomic_helper_swap_state. Doing this under the protection of all modeset
+ * locks means concurrent callers never see inconsistent state. And doing this
+ * while it's guaranteed that no relevant async worker runs means that async
+ * workers do not need grab any locks. Actually they must not grab locks, for
+ * otherwise the work flushing will deadlock.
+ *
+ * 4. Schedule a work item to do all subsequent steps, using the split-out
+ * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
+ * then cleaning up the framebuffers after the old framebuffer is no longer
+ * being displayed.
+ */
+
+/**
+ * drm_atomic_helper_prepare_planes - prepare plane resources after commit
+ * @dev: DRM device
+ * @state: atomic state object with old state structures
+ *
+ * This function prepares plane state, specifically framebuffers, for the new
+ * configuration. If any failure is encountered this function will call
+ * ->cleanup_fb on any already successfully prepared framebuffer.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int drm_atomic_helper_prepare_planes(struct drm_device *dev,
+                                    struct drm_atomic_state *state)
+{
+       int nplanes = dev->mode_config.num_total_plane;
+       int ret, i;
+
+       for (i = 0; i < nplanes; i++) {
+               struct drm_plane_helper_funcs *funcs;
+               struct drm_plane *plane = state->planes[i];
+               struct drm_framebuffer *fb;
+
+               if (!plane)
+                       continue;
+
+               funcs = plane->helper_private;
+
+               fb = state->plane_states[i]->fb;
+
+               if (fb && funcs->prepare_fb) {
+                       ret = funcs->prepare_fb(plane, fb);
+                       if (ret)
+                               goto fail;
+               }
+       }
+
+       return 0;
+
+fail:
+       for (i--; i >= 0; i--) {
+               struct drm_plane_helper_funcs *funcs;
+               struct drm_plane *plane = state->planes[i];
+               struct drm_framebuffer *fb;
+
+               if (!plane)
+                       continue;
+
+               funcs = plane->helper_private;
+
+               fb = state->plane_states[i]->fb;
+
+               if (fb && funcs->cleanup_fb)
+                       funcs->cleanup_fb(plane, fb);
+
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
+
+/**
+ * drm_atomic_helper_commit_planes - commit plane state
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function commits the new plane state using the plane and atomic helper
+ * functions for planes and crtcs. It assumes that the atomic state has already
+ * been pushed into the relevant object state pointers, since this step can no
+ * longer fail.
+ *
+ * It still requires the global state object @old_state to know which planes and
+ * crtcs need to be updated though.
+ */
+void drm_atomic_helper_commit_planes(struct drm_device *dev,
+                                    struct drm_atomic_state *old_state)
+{
+       int nplanes = dev->mode_config.num_total_plane;
+       int ncrtcs = dev->mode_config.num_crtc;
+       int i;
+
+       for (i = 0; i < ncrtcs; i++) {
+               struct drm_crtc_helper_funcs *funcs;
+               struct drm_crtc *crtc = old_state->crtcs[i];
+
+               if (!crtc)
+                       continue;
+
+               funcs = crtc->helper_private;
+
+               if (!funcs || !funcs->atomic_begin)
+                       continue;
+
+               funcs->atomic_begin(crtc);
+       }
+
+       for (i = 0; i < nplanes; i++) {
+               struct drm_plane_helper_funcs *funcs;
+               struct drm_plane *plane = old_state->planes[i];
+               struct drm_plane_state *old_plane_state;
+
+               if (!plane)
+                       continue;
+
+               funcs = plane->helper_private;
+
+               if (!funcs || !funcs->atomic_update)
+                       continue;
+
+               old_plane_state = old_state->plane_states[i];
+
+               funcs->atomic_update(plane, old_plane_state);
+       }
+
+       for (i = 0; i < ncrtcs; i++) {
+               struct drm_crtc_helper_funcs *funcs;
+               struct drm_crtc *crtc = old_state->crtcs[i];
+
+               if (!crtc)
+                       continue;
+
+               funcs = crtc->helper_private;
+
+               if (!funcs || !funcs->atomic_flush)
+                       continue;
+
+               funcs->atomic_flush(crtc);
+       }
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
+
+/**
+ * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function cleans up plane state, specifically framebuffers, from the old
+ * configuration. Hence the old configuration must be perserved in @old_state to
+ * be able to call this function.
+ *
+ * This function must also be called on the new state when the atomic update
+ * fails at any point after calling drm_atomic_helper_prepare_planes().
+ */
+void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
+                                     struct drm_atomic_state *old_state)
+{
+       int nplanes = dev->mode_config.num_total_plane;
+       int i;
+
+       for (i = 0; i < nplanes; i++) {
+               struct drm_plane_helper_funcs *funcs;
+               struct drm_plane *plane = old_state->planes[i];
+               struct drm_framebuffer *old_fb;
+
+               if (!plane)
+                       continue;
+
+               funcs = plane->helper_private;
+
+               old_fb = old_state->plane_states[i]->fb;
+
+               if (old_fb && funcs->cleanup_fb)
+                       funcs->cleanup_fb(plane, old_fb);
+       }
+}
+EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
+
+/**
+ * drm_atomic_helper_swap_state - store atomic state into current sw state
+ * @dev: DRM device
+ * @state: atomic state
+ *
+ * This function stores the atomic state into the current state pointers in all
+ * driver objects. It should be called after all failing steps have been done
+ * and succeeded, but before the actual hardware state is committed.
+ *
+ * For cleanup and error recovery the current state for all changed objects will
+ * be swaped into @state.
+ *
+ * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
+ *
+ * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
+ *
+ * 2. Do any other steps that might fail.
+ *
+ * 3. Put the staged state into the current state pointers with this function.
+ *
+ * 4. Actually commit the hardware state.
+ *
+ * 5. Call drm_atomic_helper_cleanup_planes with @state, which since step 3
+ * contains the old state. Also do any other cleanup required with that state.
+ */
+void drm_atomic_helper_swap_state(struct drm_device *dev,
+                                 struct drm_atomic_state *state)
+{
+       int i;
+
+       for (i = 0; i < dev->mode_config.num_connector; i++) {
+               struct drm_connector *connector = state->connectors[i];
+
+               if (!connector)
+                       continue;
+
+               connector->state->state = state;
+               swap(state->connector_states[i], connector->state);
+               connector->state->state = NULL;
+       }
+
+       for (i = 0; i < dev->mode_config.num_crtc; i++) {
+               struct drm_crtc *crtc = state->crtcs[i];
+
+               if (!crtc)
+                       continue;
+
+               crtc->state->state = state;
+               swap(state->crtc_states[i], crtc->state);
+               crtc->state->state = NULL;
+       }
+
+       for (i = 0; i < dev->mode_config.num_total_plane; i++) {
+               struct drm_plane *plane = state->planes[i];
+
+               if (!plane)
+                       continue;
+
+               plane->state->state = state;
+               swap(state->plane_states[i], plane->state);
+               plane->state->state = NULL;
+       }
+}
+EXPORT_SYMBOL(drm_atomic_helper_swap_state);
+
+/**
+ * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
+ * @plane: plane object to update
+ * @crtc: owning CRTC of owning plane
+ * @fb: framebuffer to flip onto plane
+ * @crtc_x: x offset of primary plane on crtc
+ * @crtc_y: y offset of primary plane on crtc
+ * @crtc_w: width of primary plane rectangle on crtc
+ * @crtc_h: height of primary plane rectangle on crtc
+ * @src_x: x offset of @fb for panning
+ * @src_y: y offset of @fb for panning
+ * @src_w: width of source rectangle in @fb
+ * @src_h: height of source rectangle in @fb
+ *
+ * Provides a default plane update handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_atomic_helper_update_plane(struct drm_plane *plane,
+                                  struct drm_crtc *crtc,
+                                  struct drm_framebuffer *fb,
+                                  int crtc_x, int crtc_y,
+                                  unsigned int crtc_w, unsigned int crtc_h,
+                                  uint32_t src_x, uint32_t src_y,
+                                  uint32_t src_w, uint32_t src_h)
+{
+       struct drm_atomic_state *state;
+       struct drm_plane_state *plane_state;
+       int ret = 0;
+
+       state = drm_atomic_state_alloc(plane->dev);
+       if (!state)
+               return -ENOMEM;
+
+       state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+retry:
+       plane_state = drm_atomic_get_plane_state(state, plane);
+       if (IS_ERR(plane_state)) {
+               ret = PTR_ERR(plane_state);
+               goto fail;
+       }
+
+       ret = drm_atomic_set_crtc_for_plane(state, plane, crtc);
+       if (ret != 0)
+               goto fail;
+       drm_atomic_set_fb_for_plane(plane_state, fb);
+       plane_state->crtc_x = crtc_x;
+       plane_state->crtc_y = crtc_y;
+       plane_state->crtc_h = crtc_h;
+       plane_state->crtc_w = crtc_w;
+       plane_state->src_x = src_x;
+       plane_state->src_y = src_y;
+       plane_state->src_h = src_h;
+       plane_state->src_w = src_w;
+
+       ret = drm_atomic_commit(state);
+       if (ret != 0)
+               goto fail;
+
+       /* Driver takes ownership of state on successful commit. */
+       return 0;
+fail:
+       if (ret == -EDEADLK)
+               goto backoff;
+
+       drm_atomic_state_free(state);
+
+       return ret;
+backoff:
+       drm_atomic_state_clear(state);
+       drm_atomic_legacy_backoff(state);
+
+       /*
+        * Someone might have exchanged the framebuffer while we dropped locks
+        * in the backoff code. We need to fix up the fb refcount tracking the
+        * core does for us.
+        */
+       plane->old_fb = plane->fb;
+
+       goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_update_plane);
+
+/**
+ * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
+ * @plane: plane to disable
+ *
+ * Provides a default plane disable handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_atomic_helper_disable_plane(struct drm_plane *plane)
+{
+       struct drm_atomic_state *state;
+       struct drm_plane_state *plane_state;
+       int ret = 0;
+
+       /*
+        * FIXME: Without plane->crtc set we can't get at the implicit legacy
+        * acquire context. The real fix will be to wire the acquire ctx through
+        * everywhere we need it, but meanwhile prevent chaos by just skipping
+        * this noop. The critical case is the cursor ioctls which a) only grab
+        * crtc/cursor-plane locks (so we need the crtc to get at the right
+        * acquire context) and b) can try to disable the plane multiple times.
+        */
+       if (!plane->crtc)
+               return 0;
+
+       state = drm_atomic_state_alloc(plane->dev);
+       if (!state)
+               return -ENOMEM;
+
+       state->acquire_ctx = drm_modeset_legacy_acquire_ctx(plane->crtc);
+retry:
+       plane_state = drm_atomic_get_plane_state(state, plane);
+       if (IS_ERR(plane_state)) {
+               ret = PTR_ERR(plane_state);
+               goto fail;
+       }
+
+       ret = drm_atomic_set_crtc_for_plane(state, plane, NULL);
+       if (ret != 0)
+               goto fail;
+       drm_atomic_set_fb_for_plane(plane_state, NULL);
+       plane_state->crtc_x = 0;
+       plane_state->crtc_y = 0;
+       plane_state->crtc_h = 0;
+       plane_state->crtc_w = 0;
+       plane_state->src_x = 0;
+       plane_state->src_y = 0;
+       plane_state->src_h = 0;
+       plane_state->src_w = 0;
+
+       ret = drm_atomic_commit(state);
+       if (ret != 0)
+               goto fail;
+
+       /* Driver takes ownership of state on successful commit. */
+       return 0;
+fail:
+       if (ret == -EDEADLK)
+               goto backoff;
+
+       drm_atomic_state_free(state);
+
+       return ret;
+backoff:
+       drm_atomic_state_clear(state);
+       drm_atomic_legacy_backoff(state);
+
+       /*
+        * Someone might have exchanged the framebuffer while we dropped locks
+        * in the backoff code. We need to fix up the fb refcount tracking the
+        * core does for us.
+        */
+       plane->old_fb = plane->fb;
+
+       goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
+
+static int update_output_state(struct drm_atomic_state *state,
+                              struct drm_mode_set *set)
+{
+       struct drm_device *dev = set->crtc->dev;
+       struct drm_connector_state *conn_state;
+       int ncrtcs = state->dev->mode_config.num_crtc;
+       int ret, i, j;
+
+       ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+                              state->acquire_ctx);
+       if (ret)
+               return ret;
+
+       /* First grab all affected connector/crtc states. */
+       for (i = 0; i < set->num_connectors; i++) {
+               conn_state = drm_atomic_get_connector_state(state,
+                                                           set->connectors[i]);
+               if (IS_ERR(conn_state))
+                       return PTR_ERR(conn_state);
+       }
+
+       for (i = 0; i < ncrtcs; i++) {
+               struct drm_crtc *crtc = state->crtcs[i];
+
+               if (!crtc)
+                       continue;
+
+               ret = drm_atomic_add_affected_connectors(state, crtc);
+               if (ret)
+                       return ret;
+       }
+
+       /* Then recompute connector->crtc links and crtc enabling state. */
+       for (i = 0; i < state->num_connector; i++) {
+               struct drm_connector *connector;
+
+               connector = state->connectors[i];
+               conn_state = state->connector_states[i];
+
+               if (!connector)
+                       continue;
+
+               if (conn_state->crtc == set->crtc) {
+                       ret = drm_atomic_set_crtc_for_connector(conn_state,
+                                                               NULL);
+                       if (ret)
+                               return ret;
+               }
+
+               for (j = 0; j < set->num_connectors; j++) {
+                       if (set->connectors[j] == connector) {
+                               ret = drm_atomic_set_crtc_for_connector(conn_state,
+                                                                       set->crtc);
+                               if (ret)
+                                       return ret;
+                               break;
+                       }
+               }
+       }
+
+       for (i = 0; i < ncrtcs; i++) {
+               struct drm_crtc *crtc = state->crtcs[i];
+               struct drm_crtc_state *crtc_state = state->crtc_states[i];
+
+               if (!crtc)
+                       continue;
+
+               /* Don't update ->enable for the CRTC in the set_config request,
+                * since a mismatch would indicate a bug in the upper layers.
+                * The actual modeset code later on will catch any
+                * inconsistencies here. */
+               if (crtc == set->crtc)
+                       continue;
+
+               crtc_state->enable =
+                       drm_atomic_connectors_for_crtc(state, crtc);
+       }
+
+       return 0;
+}
+
+/**
+ * drm_atomic_helper_set_config - set a new config from userspace
+ * @set: mode set configuration
+ *
+ * Provides a default crtc set_config handler using the atomic driver interface.
+ *
+ * Returns:
+ * Returns 0 on success, negative errno numbers on failure.
+ */
+int drm_atomic_helper_set_config(struct drm_mode_set *set)
+{
+       struct drm_atomic_state *state;
+       struct drm_crtc *crtc = set->crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_plane_state *primary_state;
+       int ret = 0;
+
+       state = drm_atomic_state_alloc(crtc->dev);
+       if (!state)
+               return -ENOMEM;
+
+       state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+retry:
+       crtc_state = drm_atomic_get_crtc_state(state, crtc);
+       if (IS_ERR(crtc_state)) {
+               ret = PTR_ERR(crtc_state);
+               goto fail;
+       }
+
+       primary_state = drm_atomic_get_plane_state(state, crtc->primary);
+       if (IS_ERR(primary_state)) {
+               ret = PTR_ERR(primary_state);
+               goto fail;
+       }
+
+       if (!set->mode) {
+               WARN_ON(set->fb);
+               WARN_ON(set->num_connectors);
+
+               crtc_state->enable = false;
+
+               ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, NULL);
+               if (ret != 0)
+                       goto fail;
+
+               drm_atomic_set_fb_for_plane(primary_state, NULL);
+
+               goto commit;
+       }
+
+       WARN_ON(!set->fb);
+       WARN_ON(!set->num_connectors);
+
+       crtc_state->enable = true;
+       drm_mode_copy(&crtc_state->mode, set->mode);
+
+       ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, crtc);
+       if (ret != 0)
+               goto fail;
+       drm_atomic_set_fb_for_plane(primary_state, set->fb);
+       primary_state->crtc_x = 0;
+       primary_state->crtc_y = 0;
+       primary_state->crtc_h = set->mode->vdisplay;
+       primary_state->crtc_w = set->mode->hdisplay;
+       primary_state->src_x = set->x << 16;
+       primary_state->src_y = set->y << 16;
+       primary_state->src_h = set->mode->vdisplay << 16;
+       primary_state->src_w = set->mode->hdisplay << 16;
+
+commit:
+       ret = update_output_state(state, set);
+       if (ret)
+               goto fail;
+
+       ret = drm_atomic_commit(state);
+       if (ret != 0)
+               goto fail;
+
+       /* Driver takes ownership of state on successful commit. */
+       return 0;
+fail:
+       if (ret == -EDEADLK)
+               goto backoff;
+
+       drm_atomic_state_free(state);
+
+       return ret;
+backoff:
+       drm_atomic_state_clear(state);
+       drm_atomic_legacy_backoff(state);
+
+       /*
+        * Someone might have exchanged the framebuffer while we dropped locks
+        * in the backoff code. We need to fix up the fb refcount tracking the
+        * core does for us.
+        */
+       crtc->primary->old_fb = crtc->primary->fb;
+
+       goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_set_config);
+
+/**
+ * drm_atomic_helper_crtc_set_property - helper for crtc prorties
+ * @crtc: DRM crtc
+ * @property: DRM property
+ * @val: value of property
+ *
+ * Provides a default plane disablle handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int
+drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
+                                   struct drm_property *property,
+                                   uint64_t val)
+{
+       struct drm_atomic_state *state;
+       struct drm_crtc_state *crtc_state;
+       int ret = 0;
+
+       state = drm_atomic_state_alloc(crtc->dev);
+       if (!state)
+               return -ENOMEM;
+
+       /* ->set_property is always called with all locks held. */
+       state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+retry:
+       crtc_state = drm_atomic_get_crtc_state(state, crtc);
+       if (IS_ERR(crtc_state)) {
+               ret = PTR_ERR(crtc_state);
+               goto fail;
+       }
+
+       ret = crtc->funcs->atomic_set_property(crtc, crtc_state,
+                                              property, val);
+       if (ret)
+               goto fail;
+
+       ret = drm_atomic_commit(state);
+       if (ret != 0)
+               goto fail;
+
+       /* Driver takes ownership of state on successful commit. */
+       return 0;
+fail:
+       if (ret == -EDEADLK)
+               goto backoff;
+
+       drm_atomic_state_free(state);
+
+       return ret;
+backoff:
+       drm_atomic_state_clear(state);
+       drm_atomic_legacy_backoff(state);
+
+       goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_set_property);
+
+/**
+ * drm_atomic_helper_plane_set_property - helper for plane prorties
+ * @plane: DRM plane
+ * @property: DRM property
+ * @val: value of property
+ *
+ * Provides a default plane disable handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int
+drm_atomic_helper_plane_set_property(struct drm_plane *plane,
+                                   struct drm_property *property,
+                                   uint64_t val)
+{
+       struct drm_atomic_state *state;
+       struct drm_plane_state *plane_state;
+       int ret = 0;
+
+       state = drm_atomic_state_alloc(plane->dev);
+       if (!state)
+               return -ENOMEM;
+
+       /* ->set_property is always called with all locks held. */
+       state->acquire_ctx = plane->dev->mode_config.acquire_ctx;
+retry:
+       plane_state = drm_atomic_get_plane_state(state, plane);
+       if (IS_ERR(plane_state)) {
+               ret = PTR_ERR(plane_state);
+               goto fail;
+       }
+
+       ret = plane->funcs->atomic_set_property(plane, plane_state,
+                                              property, val);
+       if (ret)
+               goto fail;
+
+       ret = drm_atomic_commit(state);
+       if (ret != 0)
+               goto fail;
+
+       /* Driver takes ownership of state on successful commit. */
+       return 0;
+fail:
+       if (ret == -EDEADLK)
+               goto backoff;
+
+       drm_atomic_state_free(state);
+
+       return ret;
+backoff:
+       drm_atomic_state_clear(state);
+       drm_atomic_legacy_backoff(state);
+
+       goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_set_property);
+
+/**
+ * drm_atomic_helper_connector_set_property - helper for connector prorties
+ * @connector: DRM connector
+ * @property: DRM property
+ * @val: value of property
+ *
+ * Provides a default plane disablle handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int
+drm_atomic_helper_connector_set_property(struct drm_connector *connector,
+                                   struct drm_property *property,
+                                   uint64_t val)
+{
+       struct drm_atomic_state *state;
+       struct drm_connector_state *connector_state;
+       int ret = 0;
+
+       state = drm_atomic_state_alloc(connector->dev);
+       if (!state)
+               return -ENOMEM;
+
+       /* ->set_property is always called with all locks held. */
+       state->acquire_ctx = connector->dev->mode_config.acquire_ctx;
+retry:
+       connector_state = drm_atomic_get_connector_state(state, connector);
+       if (IS_ERR(connector_state)) {
+               ret = PTR_ERR(connector_state);
+               goto fail;
+       }
+
+       ret = connector->funcs->atomic_set_property(connector, connector_state,
+                                              property, val);
+       if (ret)
+               goto fail;
+
+       ret = drm_atomic_commit(state);
+       if (ret != 0)
+               goto fail;
+
+       /* Driver takes ownership of state on successful commit. */
+       return 0;
+fail:
+       if (ret == -EDEADLK)
+               goto backoff;
+
+       drm_atomic_state_free(state);
+
+       return ret;
+backoff:
+       drm_atomic_state_clear(state);
+       drm_atomic_legacy_backoff(state);
+
+       goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_set_property);
+
+/**
+ * drm_atomic_helper_page_flip - execute a legacy page flip
+ * @crtc: DRM crtc
+ * @fb: DRM framebuffer
+ * @event: optional DRM event to signal upon completion
+ * @flags: flip flags for non-vblank sync'ed updates
+ *
+ * Provides a default page flip implementation using the atomic driver interface.
+ *
+ * Note that for now so called async page flips (i.e. updates which are not
+ * synchronized to vblank) are not supported, since the atomic interfaces have
+ * no provisions for this yet.
+ *
+ * Returns:
+ * Returns 0 on success, negative errno numbers on failure.
+ */
+int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
+                               struct drm_framebuffer *fb,
+                               struct drm_pending_vblank_event *event,
+                               uint32_t flags)
+{
+       struct drm_plane *plane = crtc->primary;
+       struct drm_atomic_state *state;
+       struct drm_plane_state *plane_state;
+       struct drm_crtc_state *crtc_state;
+       int ret = 0;
+
+       if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
+               return -EINVAL;
+
+       state = drm_atomic_state_alloc(plane->dev);
+       if (!state)
+               return -ENOMEM;
+
+       state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+retry:
+       crtc_state = drm_atomic_get_crtc_state(state, crtc);
+       if (IS_ERR(crtc_state)) {
+               ret = PTR_ERR(crtc_state);
+               goto fail;
+       }
+       crtc_state->event = event;
+
+       plane_state = drm_atomic_get_plane_state(state, plane);
+       if (IS_ERR(plane_state)) {
+               ret = PTR_ERR(plane_state);
+               goto fail;
+       }
+
+       ret = drm_atomic_set_crtc_for_plane(state, plane, crtc);
+       if (ret != 0)
+               goto fail;
+       drm_atomic_set_fb_for_plane(plane_state, fb);
+
+       ret = drm_atomic_async_commit(state);
+       if (ret != 0)
+               goto fail;
+
+       /* TODO: ->page_flip is the only driver callback where the core
+        * doesn't update plane->fb. For now patch it up here. */
+       plane->fb = plane->state->fb;
+
+       /* Driver takes ownership of state on successful async commit. */
+       return 0;
+fail:
+       if (ret == -EDEADLK)
+               goto backoff;
+
+       drm_atomic_state_free(state);
+
+       return ret;
+backoff:
+       drm_atomic_state_clear(state);
+       drm_atomic_legacy_backoff(state);
+
+       /*
+        * Someone might have exchanged the framebuffer while we dropped locks
+        * in the backoff code. We need to fix up the fb refcount tracking the
+        * core does for us.
+        */
+       plane->old_fb = plane->fb;
+
+       goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_page_flip);
+
+/**
+ * DOC: atomic state reset and initialization
+ *
+ * Both the drm core and the atomic helpers assume that there is always the full
+ * and correct atomic software state for all connectors, CRTCs and planes
+ * available. Which is a bit a problem on driver load and also after system
+ * suspend. One way to solve this is to have a hardware state read-out
+ * infrastructure which reconstructs the full software state (e.g. the i915
+ * driver).
+ *
+ * The simpler solution is to just reset the software state to everything off,
+ * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
+ * the atomic helpers provide default reset implementations for all hooks.
+ */
+
+/**
+ * drm_atomic_helper_crtc_reset - default ->reset hook for CRTCs
+ * @crtc: drm CRTC
+ *
+ * Resets the atomic state for @crtc by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ */
+void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
+{
+       kfree(crtc->state);
+       crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
+
+/**
+ * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
+ * @crtc: drm CRTC
+ *
+ * Default CRTC state duplicate hook for drivers which don't have their own
+ * subclassed CRTC state structure.
+ */
+struct drm_crtc_state *
+drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+       struct drm_crtc_state *state;
+
+       if (WARN_ON(!crtc->state))
+               return NULL;
+
+       state = kmemdup(crtc->state, sizeof(*crtc->state), GFP_KERNEL);
+
+       if (state) {
+               state->mode_changed = false;
+               state->planes_changed = false;
+               state->event = NULL;
+       }
+
+       return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
+
+/**
+ * drm_atomic_helper_crtc_destroy_state - default state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ *
+ * Default CRTC state destroy hook for drivers which don't have their own
+ * subclassed CRTC state structure.
+ */
+void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+                                         struct drm_crtc_state *state)
+{
+       kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
+
+/**
+ * drm_atomic_helper_plane_reset - default ->reset hook for planes
+ * @plane: drm plane
+ *
+ * Resets the atomic state for @plane by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ */
+void drm_atomic_helper_plane_reset(struct drm_plane *plane)
+{
+       if (plane->state && plane->state->fb)
+               drm_framebuffer_unreference(plane->state->fb);
+
+       kfree(plane->state);
+       plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
+
+/**
+ * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
+ * @plane: drm plane
+ *
+ * Default plane state duplicate hook for drivers which don't have their own
+ * subclassed plane state structure.
+ */
+struct drm_plane_state *
+drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
+{
+       struct drm_plane_state *state;
+
+       if (WARN_ON(!plane->state))
+               return NULL;
+
+       state = kmemdup(plane->state, sizeof(*plane->state), GFP_KERNEL);
+
+       if (state && state->fb)
+               drm_framebuffer_reference(state->fb);
+
+       return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
+
+/**
+ * drm_atomic_helper_plane_destroy_state - default state destroy hook
+ * @plane: drm plane
+ * @state: plane state object to release
+ *
+ * Default plane state destroy hook for drivers which don't have their own
+ * subclassed plane state structure.
+ */
+void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+                                          struct drm_plane_state *state)
+{
+       if (state->fb)
+               drm_framebuffer_unreference(state->fb);
+
+       kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
+
+/**
+ * drm_atomic_helper_connector_reset - default ->reset hook for connectors
+ * @connector: drm connector
+ *
+ * Resets the atomic state for @connector by freeing the state pointer (which
+ * might be NULL, e.g. at driver load time) and allocating a new empty state
+ * object.
+ */
+void drm_atomic_helper_connector_reset(struct drm_connector *connector)
+{
+       kfree(connector->state);
+       connector->state = kzalloc(sizeof(*connector->state), GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
+
+/**
+ * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
+ * @connector: drm connector
+ *
+ * Default connector state duplicate hook for drivers which don't have their own
+ * subclassed connector state structure.
+ */
+struct drm_connector_state *
+drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
+{
+       if (WARN_ON(!connector->state))
+               return NULL;
+
+       return kmemdup(connector->state, sizeof(*connector->state), GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
+
+/**
+ * drm_atomic_helper_connector_destroy_state - default state destroy hook
+ * @connector: drm connector
+ * @state: connector state object to release
+ *
+ * Default connector state destroy hook for drivers which don't have their own
+ * subclassed connector state structure.
+ */
+void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+                                         struct drm_connector_state *state)
+{
+       kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
index e79c8d3700d838ff87f1095bcb92a773dbe36018..de79283eaea720ba0dc49cab73060a8dfee3d3ca 100644 (file)
@@ -683,7 +683,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
        drm_modeset_lock_init(&crtc->mutex);
        ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
        if (ret)
-               goto out;
+               return ret;
 
        crtc->base.properties = &crtc->properties;
 
@@ -697,9 +697,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
        if (cursor)
                cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
 
- out:
-
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL(drm_crtc_init_with_planes);
 
@@ -723,6 +721,10 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
        drm_mode_object_put(dev, &crtc->base);
        list_del(&crtc->head);
        dev->mode_config.num_crtc--;
+
+       WARN_ON(crtc->state && !crtc->funcs->atomic_destroy_state);
+       if (crtc->state && crtc->funcs->atomic_destroy_state)
+               crtc->funcs->atomic_destroy_state(crtc, crtc->state);
 }
 EXPORT_SYMBOL(drm_crtc_cleanup);
 
@@ -766,7 +768,6 @@ static void drm_mode_remove(struct drm_connector *connector,
 /**
  * drm_connector_get_cmdline_mode - reads the user's cmdline mode
  * @connector: connector to quwery
- * @mode: returned mode
  *
  * The kernel supports per-connector configration of its consoles through
  * use of the video= parameter. This function parses that option and
@@ -870,6 +871,8 @@ int drm_connector_init(struct drm_device *dev,
 
        drm_connector_get_cmdline_mode(connector);
 
+       /* We should add connectors at the end to avoid upsetting the connector
+        * index too much. */
        list_add_tail(&connector->head, &dev->mode_config.connector_list);
        dev->mode_config.num_connector++;
 
@@ -919,6 +922,11 @@ void drm_connector_cleanup(struct drm_connector *connector)
        connector->name = NULL;
        list_del(&connector->head);
        dev->mode_config.num_connector--;
+
+       WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
+       if (connector->state && connector->funcs->atomic_destroy_state)
+               connector->funcs->atomic_destroy_state(connector,
+                                                      connector->state);
 }
 EXPORT_SYMBOL(drm_connector_cleanup);
 
@@ -933,6 +941,9 @@ unsigned int drm_connector_index(struct drm_connector *connector)
 {
        unsigned int index = 0;
        struct drm_connector *tmp;
+       struct drm_mode_config *config = &connector->dev->mode_config;
+
+       WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
        list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
                if (tmp == connector)
@@ -1153,11 +1164,11 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
 {
        int ret;
 
-       drm_modeset_lock_all(dev);
-
        ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
        if (ret)
-               goto out;
+               return ret;
+
+       drm_modeset_lock_init(&plane->mutex);
 
        plane->base.properties = &plane->properties;
        plane->dev = dev;
@@ -1167,8 +1178,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
        if (!plane->format_types) {
                DRM_DEBUG_KMS("out of memory when allocating plane\n");
                drm_mode_object_put(dev, &plane->base);
-               ret = -ENOMEM;
-               goto out;
+               return -ENOMEM;
        }
 
        memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
@@ -1185,10 +1195,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
                                   dev->mode_config.plane_type_property,
                                   plane->type);
 
- out:
-       drm_modeset_unlock_all(dev);
-
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL(drm_universal_plane_init);
 
@@ -1246,6 +1253,10 @@ void drm_plane_cleanup(struct drm_plane *plane)
        if (plane->type == DRM_PLANE_TYPE_OVERLAY)
                dev->mode_config.num_overlay_plane--;
        drm_modeset_unlock_all(dev);
+
+       WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
+       if (plane->state && plane->funcs->atomic_destroy_state)
+               plane->funcs->atomic_destroy_state(plane, plane->state);
 }
 EXPORT_SYMBOL(drm_plane_cleanup);
 
@@ -1388,12 +1399,13 @@ EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
  * responsible for allocating a list of format names and passing them to
  * this routine.
  */
-int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
+int drm_mode_create_tv_properties(struct drm_device *dev,
+                                 unsigned int num_modes,
                                  char *modes[])
 {
        struct drm_property *tv_selector;
        struct drm_property *tv_subconnector;
-       int i;
+       unsigned int i;
 
        if (dev->mode_config.tv_select_subconnector_property)
                return 0;
@@ -1491,7 +1503,7 @@ EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
  * connectors.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
 {
@@ -1535,6 +1547,30 @@ int drm_mode_create_dirty_info_property(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
 
+/**
+ * drm_mode_create_suggested_offset_properties - create suggests offset properties
+ * @dev: DRM device
+ *
+ * Create the the suggested x/y offset property for connectors.
+ */
+int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
+{
+       if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property)
+               return 0;
+
+       dev->mode_config.suggested_x_property =
+               drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff);
+
+       dev->mode_config.suggested_y_property =
+               drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff);
+
+       if (dev->mode_config.suggested_x_property == NULL ||
+           dev->mode_config.suggested_y_property == NULL)
+               return -ENOMEM;
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
+
 static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
 {
        uint32_t total_objects = 0;
@@ -1651,7 +1687,7 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
  * the caller.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 static int drm_crtc_convert_umode(struct drm_display_mode *out,
                                  const struct drm_mode_modeinfo *in)
@@ -1694,7 +1730,7 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_getresources(struct drm_device *dev, void *data,
                          struct drm_file *file_priv)
@@ -1745,7 +1781,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
        card_res->count_fbs = fb_count;
        mutex_unlock(&file_priv->fbs_lock);
 
-       drm_modeset_lock_all(dev);
+       /* mode_config.mutex protects the connector list against e.g. DP MST
+        * connector hot-adding. CRTC/Plane lists are invariant. */
+       mutex_lock(&dev->mode_config.mutex);
        if (!drm_is_primary_client(file_priv)) {
 
                mode_group = NULL;
@@ -1865,7 +1903,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
                  card_res->count_connectors, card_res->count_encoders);
 
 out:
-       drm_modeset_unlock_all(dev);
+       mutex_unlock(&dev->mode_config.mutex);
        return ret;
 }
 
@@ -1880,26 +1918,22 @@ out:
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_getcrtc(struct drm_device *dev,
                     void *data, struct drm_file *file_priv)
 {
        struct drm_mode_crtc *crtc_resp = data;
        struct drm_crtc *crtc;
-       int ret = 0;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       drm_modeset_lock_all(dev);
-
        crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
-       if (!crtc) {
-               ret = -ENOENT;
-               goto out;
-       }
+       if (!crtc)
+               return -ENOENT;
 
+       drm_modeset_lock_crtc(crtc, crtc->primary);
        crtc_resp->x = crtc->x;
        crtc_resp->y = crtc->y;
        crtc_resp->gamma_size = crtc->gamma_size;
@@ -1916,10 +1950,9 @@ int drm_mode_getcrtc(struct drm_device *dev,
        } else {
                crtc_resp->mode_valid = 0;
        }
+       drm_modeset_unlock_crtc(crtc);
 
-out:
-       drm_modeset_unlock_all(dev);
-       return ret;
+       return 0;
 }
 
 static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
@@ -1935,6 +1968,15 @@ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
        return true;
 }
 
+static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector)
+{
+       /* For atomic drivers only state objects are synchronously updated and
+        * protected by modeset locks, so check those first. */
+       if (connector->state)
+               return connector->state->best_encoder;
+       return connector->encoder;
+}
+
 /**
  * drm_mode_getconnector - get connector configuration
  * @dev: drm device for the ioctl
@@ -1946,13 +1988,14 @@ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_getconnector(struct drm_device *dev, void *data,
                          struct drm_file *file_priv)
 {
        struct drm_mode_get_connector *out_resp = data;
        struct drm_connector *connector;
+       struct drm_encoder *encoder;
        struct drm_display_mode *mode;
        int mode_count = 0;
        int props_count = 0;
@@ -2008,8 +2051,10 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
        out_resp->subpixel = connector->display_info.subpixel_order;
        out_resp->connection = connector->status;
        drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-       if (connector->encoder)
-               out_resp->encoder_id = connector->encoder->base.id;
+
+       encoder = drm_connector_get_encoder(connector);
+       if (encoder)
+               out_resp->encoder_id = encoder->base.id;
        else
                out_resp->encoder_id = 0;
        drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -2079,6 +2124,33 @@ out:
        return ret;
 }
 
+static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
+{
+       struct drm_connector *connector;
+       struct drm_device *dev = encoder->dev;
+       bool uses_atomic = false;
+
+       /* For atomic drivers only state objects are synchronously updated and
+        * protected by modeset locks, so check those first. */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (!connector->state)
+                       continue;
+
+               uses_atomic = true;
+
+               if (connector->state->best_encoder != encoder)
+                       continue;
+
+               return connector->state->crtc;
+       }
+
+       /* Don't return stale data (e.g. pending async disable). */
+       if (uses_atomic)
+               return NULL;
+
+       return encoder->crtc;
+}
+
 /**
  * drm_mode_getencoder - get encoder configuration
  * @dev: drm device for the ioctl
@@ -2090,37 +2162,38 @@ out:
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_getencoder(struct drm_device *dev, void *data,
                        struct drm_file *file_priv)
 {
        struct drm_mode_get_encoder *enc_resp = data;
        struct drm_encoder *encoder;
-       int ret = 0;
+       struct drm_crtc *crtc;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       drm_modeset_lock_all(dev);
        encoder = drm_encoder_find(dev, enc_resp->encoder_id);
-       if (!encoder) {
-               ret = -ENOENT;
-               goto out;
-       }
+       if (!encoder)
+               return -ENOENT;
 
-       if (encoder->crtc)
+       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+       crtc = drm_encoder_get_crtc(encoder);
+       if (crtc)
+               enc_resp->crtc_id = crtc->base.id;
+       else if (encoder->crtc)
                enc_resp->crtc_id = encoder->crtc->base.id;
        else
                enc_resp->crtc_id = 0;
+       drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
        enc_resp->encoder_type = encoder->encoder_type;
        enc_resp->encoder_id = encoder->base.id;
        enc_resp->possible_crtcs = encoder->possible_crtcs;
        enc_resp->possible_clones = encoder->possible_clones;
 
-out:
-       drm_modeset_unlock_all(dev);
-       return ret;
+       return 0;
 }
 
 /**
@@ -2134,7 +2207,7 @@ out:
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_getplane_res(struct drm_device *dev, void *data,
                          struct drm_file *file_priv)
@@ -2143,13 +2216,12 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
        struct drm_mode_config *config;
        struct drm_plane *plane;
        uint32_t __user *plane_ptr;
-       int copied = 0, ret = 0;
+       int copied = 0;
        unsigned num_planes;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       drm_modeset_lock_all(dev);
        config = &dev->mode_config;
 
        if (file_priv->universal_planes)
@@ -2165,6 +2237,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
            (plane_resp->count_planes >= num_planes)) {
                plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
 
+               /* Plane lists are invariant, no locking needed. */
                list_for_each_entry(plane, &config->plane_list, head) {
                        /*
                         * Unless userspace set the 'universal planes'
@@ -2174,18 +2247,14 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
                            !file_priv->universal_planes)
                                continue;
 
-                       if (put_user(plane->base.id, plane_ptr + copied)) {
-                               ret = -EFAULT;
-                               goto out;
-                       }
+                       if (put_user(plane->base.id, plane_ptr + copied))
+                               return -EFAULT;
                        copied++;
                }
        }
        plane_resp->count_planes = num_planes;
 
-out:
-       drm_modeset_unlock_all(dev);
-       return ret;
+       return 0;
 }
 
 /**
@@ -2199,7 +2268,7 @@ out:
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_getplane(struct drm_device *dev, void *data,
                      struct drm_file *file_priv)
@@ -2207,18 +2276,15 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
        struct drm_mode_get_plane *plane_resp = data;
        struct drm_plane *plane;
        uint32_t __user *format_ptr;
-       int ret = 0;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       drm_modeset_lock_all(dev);
        plane = drm_plane_find(dev, plane_resp->plane_id);
-       if (!plane) {
-               ret = -ENOENT;
-               goto out;
-       }
+       if (!plane)
+               return -ENOENT;
 
+       drm_modeset_lock(&plane->mutex, NULL);
        if (plane->crtc)
                plane_resp->crtc_id = plane->crtc->base.id;
        else
@@ -2228,6 +2294,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
                plane_resp->fb_id = plane->fb->base.id;
        else
                plane_resp->fb_id = 0;
+       drm_modeset_unlock(&plane->mutex);
 
        plane_resp->plane_id = plane->base.id;
        plane_resp->possible_crtcs = plane->possible_crtcs;
@@ -2243,15 +2310,12 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
                if (copy_to_user(format_ptr,
                                 plane->format_types,
                                 sizeof(uint32_t) * plane->format_count)) {
-                       ret = -EFAULT;
-                       goto out;
+                       return -EFAULT;
                }
        }
        plane_resp->count_format_types = plane->format_count;
 
-out:
-       drm_modeset_unlock_all(dev);
-       return ret;
+       return 0;
 }
 
 /*
@@ -2274,7 +2338,7 @@ static int __setplane_internal(struct drm_plane *plane,
 {
        int ret = 0;
        unsigned int fb_width, fb_height;
-       int i;
+       unsigned int i;
 
        /* No fb means shut it down */
        if (!fb) {
@@ -2378,13 +2442,12 @@ static int setplane_internal(struct drm_plane *plane,
  * valid crtc).
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_setplane(struct drm_device *dev, void *data,
                      struct drm_file *file_priv)
 {
        struct drm_mode_set_plane *plane_req = data;
-       struct drm_mode_object *obj;
        struct drm_plane *plane;
        struct drm_crtc *crtc = NULL;
        struct drm_framebuffer *fb = NULL;
@@ -2407,14 +2470,12 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
         * First, find the plane, crtc, and fb objects.  If not available,
         * we don't bother to call the driver.
         */
-       obj = drm_mode_object_find(dev, plane_req->plane_id,
-                                  DRM_MODE_OBJECT_PLANE);
-       if (!obj) {
+       plane = drm_plane_find(dev, plane_req->plane_id);
+       if (!plane) {
                DRM_DEBUG_KMS("Unknown plane ID %d\n",
                              plane_req->plane_id);
                return -ENOENT;
        }
-       plane = obj_to_plane(obj);
 
        if (plane_req->fb_id) {
                fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
@@ -2424,14 +2485,12 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
                        return -ENOENT;
                }
 
-               obj = drm_mode_object_find(dev, plane_req->crtc_id,
-                                          DRM_MODE_OBJECT_CRTC);
-               if (!obj) {
+               crtc = drm_crtc_find(dev, plane_req->crtc_id);
+               if (!crtc) {
                        DRM_DEBUG_KMS("Unknown crtc ID %d\n",
                                      plane_req->crtc_id);
                        return -ENOENT;
                }
-               crtc = obj_to_crtc(obj);
        }
 
        /*
@@ -2453,7 +2512,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
  * interface. The only thing it adds is correct refcounting dance.
  * 
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_set_config_internal(struct drm_mode_set *set)
 {
@@ -2546,7 +2605,7 @@ EXPORT_SYMBOL(drm_crtc_check_viewport);
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_setcrtc(struct drm_device *dev, void *data,
                     struct drm_file *file_priv)
@@ -2709,7 +2768,7 @@ out:
  * userspace wants to make use of these capabilities.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 static int drm_mode_cursor_universal(struct drm_crtc *crtc,
                                     struct drm_mode_cursor2 *req,
@@ -2810,7 +2869,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
         * If this crtc has a universal cursor plane, call that plane's update
         * handler rather than using legacy cursor handlers.
         */
-       drm_modeset_lock_crtc(crtc);
+       drm_modeset_lock_crtc(crtc, crtc->cursor);
        if (crtc->cursor) {
                ret = drm_mode_cursor_universal(crtc, req, file_priv);
                goto out;
@@ -2857,7 +2916,7 @@ out:
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_cursor_ioctl(struct drm_device *dev,
                          void *data, struct drm_file *file_priv)
@@ -2884,7 +2943,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_cursor2_ioctl(struct drm_device *dev,
                           void *data, struct drm_file *file_priv)
@@ -2943,23 +3002,21 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
  * @file_priv: drm file for the ioctl call
  *
  * Add a new FB to the specified CRTC, given a user request. This is the
- * original addfb ioclt which only supported RGB formats.
+ * original addfb ioctl which only supported RGB formats.
  *
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_addfb(struct drm_device *dev,
                   void *data, struct drm_file *file_priv)
 {
        struct drm_mode_fb_cmd *or = data;
        struct drm_mode_fb_cmd2 r = {};
-       struct drm_mode_config *config = &dev->mode_config;
-       struct drm_framebuffer *fb;
-       int ret = 0;
+       int ret;
 
-       /* Use new struct with format internally */
+       /* convert to new format and call new ioctl */
        r.fb_id = or->fb_id;
        r.width = or->width;
        r.height = or->height;
@@ -2967,28 +3024,13 @@ int drm_mode_addfb(struct drm_device *dev,
        r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
        r.handles[0] = or->handle;
 
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       if ((config->min_width > r.width) || (r.width > config->max_width))
-               return -EINVAL;
-
-       if ((config->min_height > r.height) || (r.height > config->max_height))
-               return -EINVAL;
-
-       fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
-       if (IS_ERR(fb)) {
-               DRM_DEBUG_KMS("could not create framebuffer\n");
-               return PTR_ERR(fb);
-       }
+       ret = drm_mode_addfb2(dev, &r, file_priv);
+       if (ret)
+               return ret;
 
-       mutex_lock(&file_priv->fbs_lock);
-       or->fb_id = fb->base.id;
-       list_add(&fb->filp_head, &file_priv->fbs);
-       DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
-       mutex_unlock(&file_priv->fbs_lock);
+       or->fb_id = r.fb_id;
 
-       return ret;
+       return 0;
 }
 
 static int format_check(const struct drm_mode_fb_cmd2 *r)
@@ -3080,7 +3122,7 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
        num_planes = drm_format_num_planes(r->pixel_format);
 
        if (r->width == 0 || r->width % hsub) {
-               DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
+               DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
                return -EINVAL;
        }
 
@@ -3170,7 +3212,7 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_addfb2(struct drm_device *dev,
                    void *data, struct drm_file *file_priv)
@@ -3198,7 +3240,7 @@ int drm_mode_addfb2(struct drm_device *dev,
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_rmfb(struct drm_device *dev,
                   void *data, struct drm_file *file_priv)
@@ -3252,7 +3294,7 @@ fail_lookup:
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_getfb(struct drm_device *dev,
                   void *data, struct drm_file *file_priv)
@@ -3313,7 +3355,7 @@ int drm_mode_getfb(struct drm_device *dev,
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
                           void *data, struct drm_file *file_priv)
@@ -3393,7 +3435,7 @@ out_err1:
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 void drm_fb_release(struct drm_file *priv)
 {
@@ -3435,6 +3477,10 @@ void drm_fb_release(struct drm_file *priv)
  * object with drm_object_attach_property. The returned property object must be
  * freed with drm_property_destroy.
  *
+ * Note that the DRM core keeps a per-device list of properties and that, if
+ * drm_mode_config_cleanup() is called, it will destroy all properties created
+ * by the driver.
+ *
  * Returns:
  * A pointer to the newly created property on success, NULL on failure.
  */
@@ -3462,7 +3508,7 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
 
        property->flags = flags;
        property->num_values = num_values;
-       INIT_LIST_HEAD(&property->enum_blob_list);
+       INIT_LIST_HEAD(&property->enum_list);
 
        if (name) {
                strncpy(property->name, name, DRM_PROP_NAME_LEN);
@@ -3611,7 +3657,7 @@ static struct drm_property *property_create_range(struct drm_device *dev,
  * object with drm_object_attach_property. The returned property object must be
  * freed with drm_property_destroy.
  *
- * Userspace is allowed to set any interger value in the (min, max) range
+ * Userspace is allowed to set any integer value in the (min, max) range
  * inclusive.
  *
  * Returns:
@@ -3684,8 +3730,8 @@ int drm_property_add_enum(struct drm_property *property, int index,
                        (value > 63))
                return -EINVAL;
 
-       if (!list_empty(&property->enum_blob_list)) {
-               list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+       if (!list_empty(&property->enum_list)) {
+               list_for_each_entry(prop_enum, &property->enum_list, head) {
                        if (prop_enum->value == value) {
                                strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
                                prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
@@ -3703,7 +3749,7 @@ int drm_property_add_enum(struct drm_property *property, int index,
        prop_enum->value = value;
 
        property->values[index] = value;
-       list_add_tail(&prop_enum->head, &property->enum_blob_list);
+       list_add_tail(&prop_enum->head, &property->enum_list);
        return 0;
 }
 EXPORT_SYMBOL(drm_property_add_enum);
@@ -3720,7 +3766,7 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
 {
        struct drm_property_enum *prop_enum, *pt;
 
-       list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
+       list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
                list_del(&prop_enum->head);
                kfree(prop_enum);
        }
@@ -3823,17 +3869,20 @@ int drm_object_property_get_value(struct drm_mode_object *obj,
 EXPORT_SYMBOL(drm_object_property_get_value);
 
 /**
- * drm_mode_getproperty_ioctl - get the current value of a connector's property
+ * drm_mode_getproperty_ioctl - get the property metadata
  * @dev: DRM device
  * @data: ioctl data
  * @file_priv: DRM file info
  *
- * This function retrieves the current value for an connectors's property.
+ * This function retrieves the metadata for a given property, like the different
+ * possible values for an enum property or the limits for a range property.
+ *
+ * Blob properties are special
  *
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_getproperty_ioctl(struct drm_device *dev,
                               void *data, struct drm_file *file_priv)
@@ -3841,16 +3890,12 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
        struct drm_mode_get_property *out_resp = data;
        struct drm_property *property;
        int enum_count = 0;
-       int blob_count = 0;
        int value_count = 0;
        int ret = 0, i;
        int copied;
        struct drm_property_enum *prop_enum;
        struct drm_mode_property_enum __user *enum_ptr;
-       struct drm_property_blob *prop_blob;
-       uint32_t __user *blob_id_ptr;
        uint64_t __user *values_ptr;
-       uint32_t __user *blob_length_ptr;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
@@ -3864,11 +3909,8 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
 
        if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
                        drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
-               list_for_each_entry(prop_enum, &property->enum_blob_list, head)
+               list_for_each_entry(prop_enum, &property->enum_list, head)
                        enum_count++;
-       } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
-               list_for_each_entry(prop_blob, &property->enum_blob_list, head)
-                       blob_count++;
        }
 
        value_count = property->num_values;
@@ -3893,7 +3935,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
                if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
                        copied = 0;
                        enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
-                       list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+                       list_for_each_entry(prop_enum, &property->enum_list, head) {
 
                                if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
                                        ret = -EFAULT;
@@ -3911,35 +3953,24 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
                out_resp->count_enum_blobs = enum_count;
        }
 
-       if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
-               if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
-                       copied = 0;
-                       blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
-                       blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
-
-                       list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
-                               if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
-                                       ret = -EFAULT;
-                                       goto done;
-                               }
-
-                               if (put_user(prop_blob->length, blob_length_ptr + copied)) {
-                                       ret = -EFAULT;
-                                       goto done;
-                               }
-
-                               copied++;
-                       }
-               }
-               out_resp->count_enum_blobs = blob_count;
-       }
+       /*
+        * NOTE: The idea seems to have been to use this to read all the blob
+        * property values. But nothing ever added them to the corresponding
+        * list, userspace always used the special-purpose get_blob ioctl to
+        * read the value for a blob property. It also doesn't make a lot of
+        * sense to return values here when everything else is just metadata for
+        * the property itself.
+        */
+       if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
+               out_resp->count_enum_blobs = 0;
 done:
        drm_modeset_unlock_all(dev);
        return ret;
 }
 
-static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
-                                                         void *data)
+static struct drm_property_blob *
+drm_property_create_blob(struct drm_device *dev, size_t length,
+                        const void *data)
 {
        struct drm_property_blob *blob;
        int ret;
@@ -3985,7 +4016,7 @@ static void drm_property_destroy_blob(struct drm_device *dev,
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_getblob_ioctl(struct drm_device *dev,
                           void *data, struct drm_file *file_priv)
@@ -4019,12 +4050,25 @@ done:
        return ret;
 }
 
+/**
+ * drm_mode_connector_set_path_property - set tile property on connector
+ * @connector: connector to set property on.
+ * @path: path to use for property.
+ *
+ * This creates a property to expose to userspace to specify a
+ * connector path. This is mainly used for DisplayPort MST where
+ * connectors have a topology and we want to allow userspace to give
+ * them more meaningful names.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
 int drm_mode_connector_set_path_property(struct drm_connector *connector,
-                                        char *path)
+                                        const char *path)
 {
        struct drm_device *dev = connector->dev;
-       int ret, size;
-       size = strlen(path) + 1;
+       size_t size = strlen(path) + 1;
+       int ret;
 
        connector->path_blob_ptr = drm_property_create_blob(connector->dev,
                                                            size, path);
@@ -4047,13 +4091,14 @@ EXPORT_SYMBOL(drm_mode_connector_set_path_property);
  * connector's edid property.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_connector_update_edid_property(struct drm_connector *connector,
-                                           struct edid *edid)
+                                           const struct edid *edid)
 {
        struct drm_device *dev = connector->dev;
-       int ret, size;
+       size_t size;
+       int ret;
 
        /* ignore requests to set edid when overridden */
        if (connector->override_edid)
@@ -4143,7 +4188,7 @@ static bool drm_property_change_is_valid(struct drm_property *property,
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
                                       void *data, struct drm_file *file_priv)
@@ -4226,7 +4271,7 @@ int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
 EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
 
 /**
- * drm_mode_getproperty_ioctl - get the current value of a object's property
+ * drm_mode_obj_get_properties_ioctl - get the current value of a object's property
  * @dev: DRM device
  * @data: ioctl data
  * @file_priv: DRM file info
@@ -4238,7 +4283,7 @@ EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
                                      struct drm_file *file_priv)
@@ -4310,7 +4355,7 @@ out:
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *file_priv)
@@ -4382,7 +4427,7 @@ out:
  * possible_clones and possible_crtcs bitmasks.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_connector_attach_encoder(struct drm_connector *connector,
                                      struct drm_encoder *encoder)
@@ -4409,7 +4454,7 @@ EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
  * fixed gamma table size.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
                                 int gamma_size)
@@ -4438,7 +4483,7 @@ EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_gamma_set_ioctl(struct drm_device *dev,
                             void *data, struct drm_file *file_priv)
@@ -4510,7 +4555,7 @@ out:
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_gamma_get_ioctl(struct drm_device *dev,
                             void *data, struct drm_file *file_priv)
@@ -4576,7 +4621,7 @@ out:
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_page_flip_ioctl(struct drm_device *dev,
                             void *data, struct drm_file *file_priv)
@@ -4599,7 +4644,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
        if (!crtc)
                return -ENOENT;
 
-       drm_modeset_lock_crtc(crtc);
+       drm_modeset_lock_crtc(crtc, crtc->primary);
        if (crtc->primary->fb == NULL) {
                /* The framebuffer is currently unbound, presumably
                 * due to a hotplug event, that userspace has not
@@ -4742,7 +4787,7 @@ EXPORT_SYMBOL(drm_mode_config_reset);
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_create_dumb_ioctl(struct drm_device *dev,
                               void *data, struct drm_file *file_priv)
@@ -4769,6 +4814,16 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
        if (PAGE_ALIGN(size) == 0)
                return -EINVAL;
 
+       /*
+        * handle, pitch and size are output parameters. Zero them out to
+        * prevent drivers from accidentally using uninitialized data. Since
+        * not all existing userspace is clearing these fields properly we
+        * cannot reject IOCTL with garbage in them.
+        */
+       args->handle = 0;
+       args->pitch = 0;
+       args->size = 0;
+
        return dev->driver->dumb_create(file_priv, dev, args);
 }
 
@@ -4784,7 +4839,7 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
                             void *data, struct drm_file *file_priv)
@@ -4811,7 +4866,7 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
  * Called by the user via ioctl.
  *
  * Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
  */
 int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
                                void *data, struct drm_file *file_priv)
index 6c65a0a28fbde3e0efbee9ccd9541cccef7aeaa6..d552708409ded9cb3d8a81891d4824da4711f162 100644 (file)
 #include <linux/moduleparam.h>
 
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fourcc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_edid.h>
 
+/**
+ * DOC: overview
+ *
+ * The CRTC modeset helper library provides a default set_config implementation
+ * in drm_crtc_helper_set_config(). Plus a few other convenience functions using
+ * the same callbacks which drivers can use to e.g. restore the modeset
+ * configuration on resume with drm_helper_resume_force_mode().
+ *
+ * The driver callbacks are mostly compatible with the atomic modeset helpers,
+ * except for the handling of the primary plane: Atomic helpers require that the
+ * primary plane is implemented as a real standalone plane and not directly tied
+ * to the CRTC state. For easier transition this library provides functions to
+ * implement the old semantics required by the CRTC helpers using the new plane
+ * and atomic helper callbacks.
+ *
+ * Drivers are strongly urged to convert to the atomic helpers (by way of first
+ * converting to the plane helpers). New drivers must not use these functions
+ * but need to implement the atomic interface instead, potentially using the
+ * atomic helpers for that.
+ */
 MODULE_AUTHOR("David Airlie, Jesse Barnes");
 MODULE_DESCRIPTION("DRM KMS helper");
 MODULE_LICENSE("GPL and additional rights");
@@ -888,3 +911,112 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
        drm_modeset_unlock_all(dev);
 }
 EXPORT_SYMBOL(drm_helper_resume_force_mode);
+
+/**
+ * drm_helper_crtc_mode_set - mode_set implementation for atomic plane helpers
+ * @crtc: DRM CRTC
+ * @mode: DRM display mode which userspace requested
+ * @adjusted_mode: DRM display mode adjusted by ->mode_fixup callbacks
+ * @x: x offset of the CRTC scanout area on the underlying framebuffer
+ * @y: y offset of the CRTC scanout area on the underlying framebuffer
+ * @old_fb: previous framebuffer
+ *
+ * This function implements a callback useable as the ->mode_set callback
+ * required by the crtc helpers. Besides the atomic plane helper functions for
+ * the primary plane the driver must also provide the ->mode_set_nofb callback
+ * to set up the crtc.
+ *
+ * This is a transitional helper useful for converting drivers to the atomic
+ * interfaces.
+ */
+int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+                            struct drm_display_mode *adjusted_mode, int x, int y,
+                            struct drm_framebuffer *old_fb)
+{
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       int ret;
+
+       if (crtc->funcs->atomic_duplicate_state)
+               crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
+       else if (crtc->state)
+               crtc_state = kmemdup(crtc->state, sizeof(*crtc_state),
+                                    GFP_KERNEL);
+       else
+               crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+       if (!crtc_state)
+               return -ENOMEM;
+
+       crtc_state->enable = true;
+       crtc_state->planes_changed = true;
+       crtc_state->mode_changed = true;
+       drm_mode_copy(&crtc_state->mode, mode);
+       drm_mode_copy(&crtc_state->adjusted_mode, adjusted_mode);
+
+       if (crtc_funcs->atomic_check) {
+               ret = crtc_funcs->atomic_check(crtc, crtc_state);
+               if (ret) {
+                       kfree(crtc_state);
+
+                       return ret;
+               }
+       }
+
+       swap(crtc->state, crtc_state);
+
+       crtc_funcs->mode_set_nofb(crtc);
+
+       if (crtc_state) {
+               if (crtc->funcs->atomic_destroy_state)
+                       crtc->funcs->atomic_destroy_state(crtc, crtc_state);
+               else
+                       kfree(crtc_state);
+       }
+
+       return drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
+}
+EXPORT_SYMBOL(drm_helper_crtc_mode_set);
+
+/**
+ * drm_helper_crtc_mode_set_base - mode_set_base implementation for atomic plane helpers
+ * @crtc: DRM CRTC
+ * @x: x offset of the CRTC scanout area on the underlying framebuffer
+ * @y: y offset of the CRTC scanout area on the underlying framebuffer
+ * @old_fb: previous framebuffer
+ *
+ * This function implements a callback useable as the ->mode_set_base used
+ * required by the crtc helpers. The driver must provide the atomic plane helper
+ * functions for the primary plane.
+ *
+ * This is a transitional helper useful for converting drivers to the atomic
+ * interfaces.
+ */
+int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+                                 struct drm_framebuffer *old_fb)
+{
+       struct drm_plane_state *plane_state;
+       struct drm_plane *plane = crtc->primary;
+
+       if (plane->funcs->atomic_duplicate_state)
+               plane_state = plane->funcs->atomic_duplicate_state(plane);
+       else if (plane->state)
+               plane_state = drm_atomic_helper_plane_duplicate_state(plane);
+       else
+               plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+       if (!plane_state)
+               return -ENOMEM;
+
+       plane_state->crtc = crtc;
+       drm_atomic_set_fb_for_plane(plane_state, crtc->primary->fb);
+       plane_state->crtc_x = 0;
+       plane_state->crtc_y = 0;
+       plane_state->crtc_h = crtc->mode.vdisplay;
+       plane_state->crtc_w = crtc->mode.hdisplay;
+       plane_state->src_x = x << 16;
+       plane_state->src_y = y << 16;
+       plane_state->src_h = crtc->mode.vdisplay << 16;
+       plane_state->src_w = crtc->mode.hdisplay << 16;
+
+       return drm_plane_helper_commit(plane, plane_state, old_fb);
+}
+EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
index 08e33b8b13a433be43b0a39f462791388a856b95..959e2074b0d4299c10d45311fbce06c13ab58d0c 100644 (file)
  * blocks, ...
  */
 
-/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
-static int
-i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
-                           uint8_t write_byte, uint8_t *read_byte)
-{
-       struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
-       int ret;
-
-       ret = (*algo_data->aux_ch)(adapter, mode,
-                                  write_byte, read_byte);
-       return ret;
-}
-
-/*
- * I2C over AUX CH
- */
-
-/*
- * Send the address. If the I2C link is running, this 'restarts'
- * the connection with the new address, this is used for doing
- * a write followed by a read (as needed for DDC)
- */
-static int
-i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
-{
-       struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
-       int mode = MODE_I2C_START;
-       int ret;
-
-       if (reading)
-               mode |= MODE_I2C_READ;
-       else
-               mode |= MODE_I2C_WRITE;
-       algo_data->address = address;
-       algo_data->running = true;
-       ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
-       return ret;
-}
-
-/*
- * Stop the I2C transaction. This closes out the link, sending
- * a bare address packet with the MOT bit turned off
- */
-static void
-i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
-{
-       struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
-       int mode = MODE_I2C_STOP;
-
-       if (reading)
-               mode |= MODE_I2C_READ;
-       else
-               mode |= MODE_I2C_WRITE;
-       if (algo_data->running) {
-               (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
-               algo_data->running = false;
-       }
-}
-
-/*
- * Write a single byte to the current I2C address, the
- * the I2C link must be running or this returns -EIO
- */
-static int
-i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
-{
-       struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
-       int ret;
-
-       if (!algo_data->running)
-               return -EIO;
-
-       ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
-       return ret;
-}
-
-/*
- * Read a single byte from the current I2C address, the
- * I2C link must be running or this returns -EIO
- */
-static int
-i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
-{
-       struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
-       int ret;
-
-       if (!algo_data->running)
-               return -EIO;
-
-       ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
-       return ret;
-}
-
-static int
-i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
-                    struct i2c_msg *msgs,
-                    int num)
-{
-       int ret = 0;
-       bool reading = false;
-       int m;
-       int b;
-
-       for (m = 0; m < num; m++) {
-               u16 len = msgs[m].len;
-               u8 *buf = msgs[m].buf;
-               reading = (msgs[m].flags & I2C_M_RD) != 0;
-               ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
-               if (ret < 0)
-                       break;
-               if (reading) {
-                       for (b = 0; b < len; b++) {
-                               ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
-                               if (ret < 0)
-                                       break;
-                       }
-               } else {
-                       for (b = 0; b < len; b++) {
-                               ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
-                               if (ret < 0)
-                                       break;
-                       }
-               }
-               if (ret < 0)
-                       break;
-       }
-       if (ret >= 0)
-               ret = num;
-       i2c_algo_dp_aux_stop(adapter, reading);
-       DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
-       return ret;
-}
-
-static u32
-i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
-{
-       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
-              I2C_FUNC_SMBUS_READ_BLOCK_DATA |
-              I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
-              I2C_FUNC_10BIT_ADDR;
-}
-
-static const struct i2c_algorithm i2c_dp_aux_algo = {
-       .master_xfer    = i2c_algo_dp_aux_xfer,
-       .functionality  = i2c_algo_dp_aux_functionality,
-};
-
-static void
-i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
-{
-       (void) i2c_algo_dp_aux_address(adapter, 0, false);
-       (void) i2c_algo_dp_aux_stop(adapter, false);
-}
-
-static int
-i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
-{
-       adapter->algo = &i2c_dp_aux_algo;
-       adapter->retries = 3;
-       i2c_dp_aux_reset_bus(adapter);
-       return 0;
-}
-
-/**
- * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
- * @adapter: i2c adapter to register
- *
- * This registers an i2c adapter that uses dp aux channel as it's underlaying
- * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
- * and store it in the algo_data member of the @adapter argument. This will be
- * used by the i2c over dp aux algorithm to drive the hardware.
- *
- * RETURNS:
- * 0 on success, -ERRNO on failure.
- *
- * IMPORTANT:
- * This interface is deprecated, please switch to the new dp aux helpers and
- * drm_dp_aux_register().
- */
-int
-i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
-{
-       int error;
-
-       error = i2c_dp_aux_prepare_bus(adapter);
-       if (error)
-               return error;
-       error = i2c_add_adapter(adapter);
-       return error;
-}
-EXPORT_SYMBOL(i2c_dp_aux_add_bus);
-
 /* Helpers for DP link training */
 static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
 {
@@ -654,10 +462,12 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
 
                case DP_AUX_I2C_REPLY_NACK:
                        DRM_DEBUG_KMS("I2C nack\n");
+                       aux->i2c_nack_count++;
                        return -EREMOTEIO;
 
                case DP_AUX_I2C_REPLY_DEFER:
                        DRM_DEBUG_KMS("I2C defer\n");
+                       aux->i2c_defer_count++;
                        usleep_range(400, 500);
                        continue;
 
index 070f913d2dba40eede7075f6e4b4b652e18dbdfa..5682d7e9f1ec28a825263097d7009fdbf5b5cdd5 100644 (file)
@@ -1011,19 +1011,20 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
 
 static void build_mst_prop_path(struct drm_dp_mst_port *port,
                                struct drm_dp_mst_branch *mstb,
-                               char *proppath)
+                               char *proppath,
+                               size_t proppath_size)
 {
        int i;
        char temp[8];
-       snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id);
+       snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
        for (i = 0; i < (mstb->lct - 1); i++) {
                int shift = (i % 2) ? 0 : 4;
                int port_num = mstb->rad[i / 2] >> shift;
-               snprintf(temp, 8, "-%d", port_num);
-               strncat(proppath, temp, 255);
+               snprintf(temp, sizeof(temp), "-%d", port_num);
+               strlcat(proppath, temp, proppath_size);
        }
-       snprintf(temp, 8, "-%d", port->port_num);
-       strncat(proppath, temp, 255);
+       snprintf(temp, sizeof(temp), "-%d", port->port_num);
+       strlcat(proppath, temp, proppath_size);
 }
 
 static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
@@ -1094,7 +1095,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
 
        if (created && !port->input) {
                char proppath[255];
-               build_mst_prop_path(port, mstb, proppath);
+               build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
                port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
        }
 
@@ -1798,17 +1799,27 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
        return 0;
 }
 
-static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count)
+static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
+                                    int dp_link_count,
+                                    int *out)
 {
        switch (dp_link_bw) {
+       default:
+               DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
+                             dp_link_bw, dp_link_count);
+               return false;
+
        case DP_LINK_BW_1_62:
-               return 3 * dp_link_count;
+               *out = 3 * dp_link_count;
+               break;
        case DP_LINK_BW_2_7:
-               return 5 * dp_link_count;
+               *out = 5 * dp_link_count;
+               break;
        case DP_LINK_BW_5_4:
-               return 10 * dp_link_count;
+               *out = 10 * dp_link_count;
+               break;
        }
-       BUG();
+       return true;
 }
 
 /**
@@ -1840,7 +1851,13 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
                        goto out_unlock;
                }
 
-               mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
+               if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
+                                             mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
+                                             &mgr->pbn_div)) {
+                       ret = -EINVAL;
+                       goto out_unlock;
+               }
+
                mgr->total_pbn = 2560;
                mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
                mgr->avail_slots = mgr->total_slots;
index bc3da32d458590a849a785aeca890670741e5212..4f41377b0b80963f9ce84b3492412d025924652b 100644 (file)
@@ -56,7 +56,7 @@ static struct idr drm_minors_idr;
 struct class *drm_class;
 static struct dentry *drm_debugfs_root;
 
-void drm_err(const char *func, const char *format, ...)
+void drm_err(const char *format, ...)
 {
        struct va_format vaf;
        va_list args;
@@ -66,7 +66,8 @@ void drm_err(const char *func, const char *format, ...)
        vaf.fmt = format;
        vaf.va = &args;
 
-       printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
+       printk(KERN_ERR "[" DRM_NAME ":%pf] *ERROR* %pV",
+              __builtin_return_address(0), &vaf);
 
        va_end(args);
 }
@@ -534,6 +535,8 @@ static void drm_fs_inode_free(struct inode *inode)
  * The initial ref-count of the object is 1. Use drm_dev_ref() and
  * drm_dev_unref() to take and drop further ref-counts.
  *
+ * Note that for purely virtual devices @parent can be NULL.
+ *
  * RETURNS:
  * Pointer to new DRM device, or NULL if out of memory.
  */
index 3bf999134bcc503dd12e9c8dce3244a81e447a24..a7b5a71856a72a3956cc4f3c6fbe90442e1a535f 100644 (file)
@@ -1125,9 +1125,9 @@ EXPORT_SYMBOL(drm_edid_is_valid);
  * Return: 0 on success or -1 on failure.
  */
 static int
-drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
-                     int block, int len)
+drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
 {
+       struct i2c_adapter *adapter = data;
        unsigned char start = block * EDID_LENGTH;
        unsigned char segment = block >> 1;
        unsigned char xfers = segment ? 3 : 2;
@@ -1184,8 +1184,26 @@ static bool drm_edid_is_zero(u8 *in_edid, int length)
        return true;
 }
 
-static u8 *
-drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+/**
+ * drm_do_get_edid - get EDID data using a custom EDID block read function
+ * @connector: connector we're probing
+ * @get_edid_block: EDID block read function
+ * @data: private data passed to the block read function
+ *
+ * When the I2C adapter connected to the DDC bus is hidden behind a device that
+ * exposes a different interface to read EDID blocks this function can be used
+ * to get EDID data using a custom block read function.
+ *
+ * As in the general case the DDC bus is accessible by the kernel at the I2C
+ * level, drivers must make all reasonable efforts to expose it as an I2C
+ * adapter and use drm_get_edid() instead of abusing this function.
+ *
+ * Return: Pointer to valid EDID or NULL if we couldn't find any.
+ */
+struct edid *drm_do_get_edid(struct drm_connector *connector,
+       int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
+                             size_t len),
+       void *data)
 {
        int i, j = 0, valid_extensions = 0;
        u8 *block, *new;
@@ -1196,7 +1214,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 
        /* base block fetch */
        for (i = 0; i < 4; i++) {
-               if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+               if (get_edid_block(data, block, 0, EDID_LENGTH))
                        goto out;
                if (drm_edid_block_valid(block, 0, print_bad_edid))
                        break;
@@ -1210,7 +1228,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 
        /* if there's no extensions, we're done */
        if (block[0x7e] == 0)
-               return block;
+               return (struct edid *)block;
 
        new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
        if (!new)
@@ -1219,7 +1237,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 
        for (j = 1; j <= block[0x7e]; j++) {
                for (i = 0; i < 4; i++) {
-                       if (drm_do_probe_ddc_edid(adapter,
+                       if (get_edid_block(data,
                                  block + (valid_extensions + 1) * EDID_LENGTH,
                                  j, EDID_LENGTH))
                                goto out;
@@ -1247,7 +1265,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
                block = new;
        }
 
-       return block;
+       return (struct edid *)block;
 
 carp:
        if (print_bad_edid) {
@@ -1260,6 +1278,7 @@ out:
        kfree(block);
        return NULL;
 }
+EXPORT_SYMBOL_GPL(drm_do_get_edid);
 
 /**
  * drm_probe_ddc() - probe DDC presence
@@ -1289,12 +1308,10 @@ EXPORT_SYMBOL(drm_probe_ddc);
 struct edid *drm_get_edid(struct drm_connector *connector,
                          struct i2c_adapter *adapter)
 {
-       struct edid *edid = NULL;
-
-       if (drm_probe_ddc(adapter))
-               edid = (struct edid *)drm_do_get_edid(connector, adapter);
+       if (!drm_probe_ddc(adapter))
+               return NULL;
 
-       return edid;
+       return drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
 }
 EXPORT_SYMBOL(drm_get_edid);
 
@@ -3128,9 +3145,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
                }
        }
        eld[5] |= sad_count << 4;
-       eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
 
-       DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
+       eld[DRM_ELD_BASELINE_ELD_LEN] =
+               DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
+
+       DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
+                     drm_eld_size(eld), sad_count);
 }
 EXPORT_SYMBOL(drm_edid_to_eld);
 
index 0a235fe61c9b9bfd929a1f4a71461bc6ad2bd3d7..732cb6f8e653f58dee7102f0bb11b797cde2f5ad 100644 (file)
@@ -254,8 +254,7 @@ static void *edid_load(struct drm_connector *connector, const char *name,
            name, connector_name);
 
 out:
-       if (fw)
-               release_firmware(fw);
+       release_firmware(fw);
        return edid;
 }
 
index 0c0c39bac23da21a368ef94681da10b173994660..09d47e9ba02612642a19987f9cea6dc39b795f82 100644 (file)
@@ -1570,7 +1570,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
                modeset = &fb_helper->crtc_info[i].mode_set;
                if (modeset->num_connectors == 0) {
                        BUG_ON(modeset->fb);
-                       BUG_ON(modeset->num_connectors);
                        if (modeset->mode)
                                drm_mode_destroy(dev, modeset->mode);
                        modeset->mode = NULL;
index f9c7fa3d00124e9d4cd126cccd3a8f78ee0d7836..43d9b950ef9fd03bb0f7eb267a0c46a01e6688c7 100644 (file)
 #include "drmP.h"
 #include "drm_flip_work.h"
 
+/**
+ * drm_flip_work_allocate_task - allocate a flip-work task
+ * @data: data associated to the task
+ * @flags: allocator flags
+ *
+ * Allocate a drm_flip_task object and attach private data to it.
+ */
+struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
+{
+       struct drm_flip_task *task;
+
+       task = kzalloc(sizeof(*task), flags);
+       if (task)
+               task->data = data;
+
+       return task;
+}
+EXPORT_SYMBOL(drm_flip_work_allocate_task);
+
+/**
+ * drm_flip_work_queue_task - queue a specific task
+ * @work: the flip-work
+ * @task: the task to handle
+ *
+ * Queues task, that will later be run (passed back to drm_flip_func_t
+ * func) on a work queue after drm_flip_work_commit() is called.
+ */
+void drm_flip_work_queue_task(struct drm_flip_work *work,
+                             struct drm_flip_task *task)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&work->lock, flags);
+       list_add_tail(&task->node, &work->queued);
+       spin_unlock_irqrestore(&work->lock, flags);
+}
+EXPORT_SYMBOL(drm_flip_work_queue_task);
+
 /**
  * drm_flip_work_queue - queue work
  * @work: the flip-work
  */
 void drm_flip_work_queue(struct drm_flip_work *work, void *val)
 {
-       if (kfifo_put(&work->fifo, val)) {
-               atomic_inc(&work->pending);
+       struct drm_flip_task *task;
+
+       task = drm_flip_work_allocate_task(val,
+                               drm_can_sleep() ? GFP_KERNEL : GFP_ATOMIC);
+       if (task) {
+               drm_flip_work_queue_task(work, task);
        } else {
-               DRM_ERROR("%s fifo full!\n", work->name);
+               DRM_ERROR("%s could not allocate task!\n", work->name);
                work->func(work, val);
        }
 }
@@ -56,9 +98,12 @@ EXPORT_SYMBOL(drm_flip_work_queue);
 void drm_flip_work_commit(struct drm_flip_work *work,
                struct workqueue_struct *wq)
 {
-       uint32_t pending = atomic_read(&work->pending);
-       atomic_add(pending, &work->count);
-       atomic_sub(pending, &work->pending);
+       unsigned long flags;
+
+       spin_lock_irqsave(&work->lock, flags);
+       list_splice_tail(&work->queued, &work->commited);
+       INIT_LIST_HEAD(&work->queued);
+       spin_unlock_irqrestore(&work->lock, flags);
        queue_work(wq, &work->worker);
 }
 EXPORT_SYMBOL(drm_flip_work_commit);
@@ -66,47 +111,46 @@ EXPORT_SYMBOL(drm_flip_work_commit);
 static void flip_worker(struct work_struct *w)
 {
        struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
-       uint32_t count = atomic_read(&work->count);
-       void *val = NULL;
+       struct list_head tasks;
+       unsigned long flags;
+
+       while (1) {
+               struct drm_flip_task *task, *tmp;
+
+               INIT_LIST_HEAD(&tasks);
+               spin_lock_irqsave(&work->lock, flags);
+               list_splice_tail(&work->commited, &tasks);
+               INIT_LIST_HEAD(&work->commited);
+               spin_unlock_irqrestore(&work->lock, flags);
 
-       atomic_sub(count, &work->count);
+               if (list_empty(&tasks))
+                       break;
 
-       while(count--)
-               if (!WARN_ON(!kfifo_get(&work->fifo, &val)))
-                       work->func(work, val);
+               list_for_each_entry_safe(task, tmp, &tasks, node) {
+                       work->func(work, task->data);
+                       kfree(task);
+               }
+       }
 }
 
 /**
  * drm_flip_work_init - initialize flip-work
  * @work: the flip-work to initialize
- * @size: the max queue depth
  * @name: debug name
  * @func: the callback work function
  *
  * Initializes/allocates resources for the flip-work
- *
- * RETURNS:
- * Zero on success, error code on failure.
  */
-int drm_flip_work_init(struct drm_flip_work *work, int size,
+void drm_flip_work_init(struct drm_flip_work *work,
                const char *name, drm_flip_func_t func)
 {
-       int ret;
-
        work->name = name;
-       atomic_set(&work->count, 0);
-       atomic_set(&work->pending, 0);
+       INIT_LIST_HEAD(&work->queued);
+       INIT_LIST_HEAD(&work->commited);
+       spin_lock_init(&work->lock);
        work->func = func;
 
-       ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL);
-       if (ret) {
-               DRM_ERROR("could not allocate %s fifo\n", name);
-               return ret;
-       }
-
        INIT_WORK(&work->worker, flip_worker);
-
-       return 0;
 }
 EXPORT_SYMBOL(drm_flip_work_init);
 
@@ -118,7 +162,6 @@ EXPORT_SYMBOL(drm_flip_work_init);
  */
 void drm_flip_work_cleanup(struct drm_flip_work *work)
 {
-       WARN_ON(!kfifo_is_empty(&work->fifo));
-       kfifo_free(&work->fifo);
+       WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited));
 }
 EXPORT_SYMBOL(drm_flip_work_cleanup);
index ed7bc68f7e8700041894c7f35685af449c4deb2e..91e1105f28006eae6a4e1e1a654a20c1315cff80 100644 (file)
@@ -515,10 +515,12 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
        size_t total;
        ssize_t ret;
 
-       ret = wait_event_interruptible(file_priv->event_wait,
-                                      !list_empty(&file_priv->event_list));
-       if (ret < 0)
-               return ret;
+       if ((filp->f_flags & O_NONBLOCK) == 0) {
+               ret = wait_event_interruptible(file_priv->event_wait,
+                                              !list_empty(&file_priv->event_list));
+               if (ret < 0)
+                       return ret;
+       }
 
        total = 0;
        while (drm_dequeue_event(file_priv, total, count, &e)) {
@@ -532,7 +534,7 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
                e->destroy(e);
        }
 
-       return total;
+       return total ?: -EAGAIN;
 }
 EXPORT_SYMBOL(drm_read);
 
index f6ca51259fa3a90dbc46fff70d8e15dc5e483ba8..16a1647707136cb7207a83967af7235367e1ee73 100644 (file)
@@ -188,7 +188,7 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
 }
 
 /**
- * drm_gem_object_free - release resources bound to userspace handles
+ * drm_gem_object_handle_free - release resources bound to userspace handles
  * @obj: GEM object to clean up.
  *
  * Called after the last handle to the object has been closed
@@ -309,7 +309,7 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy);
  * drm_gem_handle_create_tail - internal functions to create a handle
  * @file_priv: drm file-private structure to register the handle for
  * @obj: object to register
- * @handlep: pionter to return the created handle to the caller
+ * @handlep: pointer to return the created handle to the caller
  * 
  * This expects the dev->object_name_lock to be held already and will drop it
  * before returning. Used to avoid races in establishing new handles when
@@ -362,7 +362,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
 }
 
 /**
- * gem_handle_create - create a gem handle for an object
+ * drm_gem_handle_create - create a gem handle for an object
  * @file_priv: drm file-private structure to register the handle for
  * @obj: object to register
  * @handlep: pionter to return the created handle to the caller
@@ -371,10 +371,9 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
  * to the object, which includes a regular reference count. Callers
  * will likely want to dereference the object afterwards.
  */
-int
-drm_gem_handle_create(struct drm_file *file_priv,
-                      struct drm_gem_object *obj,
-                      u32 *handlep)
+int drm_gem_handle_create(struct drm_file *file_priv,
+                         struct drm_gem_object *obj,
+                         u32 *handlep)
 {
        mutex_lock(&obj->dev->object_name_lock);
 
index 0316310e2cc47400703418ea324e381cc65f8c8b..e419eedf751de6cb4c3f0b64b88dcd771cf594bc 100644 (file)
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_vma_manager.h>
 
-/*
+/**
+ * DOC: cma helpers
+ *
+ * The Contiguous Memory Allocator reserves a pool of memory at early boot
+ * that is used to service requests for large blocks of contiguous memory.
+ *
+ * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
+ * objects that are physically contiguous in memory. This is useful for
+ * display drivers that are unable to map scattered buffers via an IOMMU.
+ */
+
+/**
  * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
- * @drm: The drm device
- * @size: The GEM object size
+ * @drm: DRM device
+ * @size: size of the object to allocate
  *
- * This function creates and initializes a GEM CMA object of the given size, but
- * doesn't allocate any memory to back the object.
+ * This function creates and initializes a GEM CMA object of the given size,
+ * but doesn't allocate any memory to back the object.
  *
- * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure.
+ * Returns:
+ * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
  */
 static struct drm_gem_cma_object *
-__drm_gem_cma_create(struct drm_device *drm, unsigned int size)
+__drm_gem_cma_create(struct drm_device *drm, size_t size)
 {
        struct drm_gem_cma_object *cma_obj;
        struct drm_gem_object *gem_obj;
@@ -69,14 +82,21 @@ error:
        return ERR_PTR(ret);
 }
 
-/*
+/**
  * drm_gem_cma_create - allocate an object with the given size
+ * @drm: DRM device
+ * @size: size of the object to allocate
+ *
+ * This function creates a CMA GEM object and allocates a contiguous chunk of
+ * memory as backing store. The backing memory has the writecombine attribute
+ * set.
  *
- * returns a struct drm_gem_cma_object* on success or ERR_PTR values
- * on failure.
+ * Returns:
+ * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
  */
 struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
-               unsigned int size)
+                                             size_t size)
 {
        struct drm_gem_cma_object *cma_obj;
        int ret;
@@ -104,17 +124,26 @@ error:
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_create);
 
-/*
- * drm_gem_cma_create_with_handle - allocate an object with the given
- * size and create a gem handle on it
+/**
+ * drm_gem_cma_create_with_handle - allocate an object with the given size and
+ *     return a GEM handle to it
+ * @file_priv: DRM file-private structure to register the handle for
+ * @drm: DRM device
+ * @size: size of the object to allocate
+ * @handle: return location for the GEM handle
+ *
+ * This function creates a CMA GEM object, allocating a physically contiguous
+ * chunk of memory as backing store. The GEM object is then added to the list
+ * of object associated with the given file and a handle to it is returned.
  *
- * returns a struct drm_gem_cma_object* on success or ERR_PTR values
- * on failure.
+ * Returns:
+ * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
  */
-static struct drm_gem_cma_object *drm_gem_cma_create_with_handle(
-               struct drm_file *file_priv,
-               struct drm_device *drm, unsigned int size,
-               unsigned int *handle)
+static struct drm_gem_cma_object *
+drm_gem_cma_create_with_handle(struct drm_file *file_priv,
+                              struct drm_device *drm, size_t size,
+                              uint32_t *handle)
 {
        struct drm_gem_cma_object *cma_obj;
        struct drm_gem_object *gem_obj;
@@ -145,16 +174,19 @@ err_handle_create:
        return ERR_PTR(ret);
 }
 
-/*
- * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback
- * function
+/**
+ * drm_gem_cma_free_object - free resources associated with a CMA GEM object
+ * @gem_obj: GEM object to free
+ *
+ * This function frees the backing memory of the CMA GEM object, cleans up the
+ * GEM object state and frees the memory used to store the object itself.
+ * Drivers using the CMA helpers should set this as their DRM driver's
+ * ->gem_free_object() callback.
  */
 void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
 {
        struct drm_gem_cma_object *cma_obj;
 
-       drm_gem_free_mmap_offset(gem_obj);
-
        cma_obj = to_drm_gem_cma_obj(gem_obj);
 
        if (cma_obj->vaddr) {
@@ -170,18 +202,26 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
 
-/*
- * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
- * function
+/**
+ * drm_gem_cma_dumb_create_internal - create a dumb buffer object
+ * @file_priv: DRM file-private structure to create the dumb buffer for
+ * @drm: DRM device
+ * @args: IOCTL data
+ *
+ * This aligns the pitch and size arguments to the minimum required. This is
+ * an internal helper that can be wrapped by a driver to account for hardware
+ * with more specific alignment requirements. It should not be used directly
+ * as the ->dumb_create() callback in a DRM driver.
  *
- * This aligns the pitch and size arguments to the minimum required. wrap
- * this into your own function if you need bigger alignment.
+ * Returns:
+ * 0 on success or a negative error code on failure.
  */
-int drm_gem_cma_dumb_create(struct drm_file *file_priv,
-               struct drm_device *dev, struct drm_mode_create_dumb *args)
+int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
+                                    struct drm_device *drm,
+                                    struct drm_mode_create_dumb *args)
 {
+       unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
        struct drm_gem_cma_object *cma_obj;
-       int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 
        if (args->pitch < min_pitch)
                args->pitch = min_pitch;
@@ -189,18 +229,63 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv,
        if (args->size < args->pitch * args->height)
                args->size = args->pitch * args->height;
 
-       cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
-                       args->size, &args->handle);
+       cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
+                                                &args->handle);
+       return PTR_ERR_OR_ZERO(cma_obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
+
+/**
+ * drm_gem_cma_dumb_create - create a dumb buffer object
+ * @file_priv: DRM file-private structure to create the dumb buffer for
+ * @drm: DRM device
+ * @args: IOCTL data
+ *
+ * This function computes the pitch of the dumb buffer and rounds it up to an
+ * integer number of bytes per pixel. Drivers for hardware that doesn't have
+ * any additional restrictions on the pitch can directly use this function as
+ * their ->dumb_create() callback.
+ *
+ * For hardware with additional restrictions, drivers can adjust the fields
+ * set up by userspace and pass the IOCTL data along to the
+ * drm_gem_cma_dumb_create_internal() function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+                           struct drm_device *drm,
+                           struct drm_mode_create_dumb *args)
+{
+       struct drm_gem_cma_object *cma_obj;
+
+       args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+       args->size = args->pitch * args->height;
+
+       cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
+                                                &args->handle);
        return PTR_ERR_OR_ZERO(cma_obj);
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
 
-/*
- * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback
- * function
+/**
+ * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
+ *     object
+ * @file_priv: DRM file-private structure containing the GEM object
+ * @drm: DRM device
+ * @handle: GEM object handle
+ * @offset: return location for the fake mmap offset
+ *
+ * This function look up an object by its handle and returns the fake mmap
+ * offset associated with it. Drivers using the CMA helpers should set this
+ * as their DRM driver's ->dumb_map_offset() callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
  */
 int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
-               struct drm_device *drm, uint32_t handle, uint64_t *offset)
+                               struct drm_device *drm, u32 handle,
+                               u64 *offset)
 {
        struct drm_gem_object *gem_obj;
 
@@ -208,7 +293,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
 
        gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
        if (!gem_obj) {
-               dev_err(drm->dev, "failed to lookup gem object\n");
+               dev_err(drm->dev, "failed to lookup GEM object\n");
                mutex_unlock(&drm->struct_mutex);
                return -EINVAL;
        }
@@ -251,8 +336,20 @@ static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
        return ret;
 }
 
-/*
- * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
+/**
+ * drm_gem_cma_mmap - memory-map a CMA GEM object
+ * @filp: file object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function implements an augmented version of the GEM DRM file mmap
+ * operation for CMA objects: In addition to the usual GEM VMA setup it
+ * immediately faults in the entire object instead of using on-demaind
+ * faulting. Drivers which employ the CMA helpers should use this function
+ * as their ->mmap() handler in the DRM device file's file_operations
+ * structure.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
  */
 int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
 {
@@ -272,7 +369,16 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
 EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
 
 #ifdef CONFIG_DEBUG_FS
-void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
+/**
+ * drm_gem_cma_describe - describe a CMA GEM object for debugfs
+ * @cma_obj: CMA GEM object
+ * @m: debugfs file handle
+ *
+ * This function can be used to dump a human-readable representation of the
+ * CMA GEM object into a synthetic file.
+ */
+void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
+                         struct seq_file *m)
 {
        struct drm_gem_object *obj = &cma_obj->base;
        struct drm_device *dev = obj->dev;
@@ -291,7 +397,18 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m
 EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
 #endif
 
-/* low-level interface prime helpers */
+/**
+ * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
+ *     pages for a CMA GEM object
+ * @obj: GEM object
+ *
+ * This function exports a scatter/gather table suitable for PRIME usage by
+ * calling the standard DMA mapping API. Drivers using the CMA helpers should
+ * set this as their DRM driver's ->gem_prime_get_sg_table() callback.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ */
 struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
 {
        struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
@@ -315,6 +432,23 @@ out:
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
 
+/**
+ * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
+ *     driver's scatter/gather table of pinned pages
+ * @dev: device to import into
+ * @attach: DMA-BUF attachment
+ * @sgt: scatter/gather table of pinned pages
+ *
+ * This function imports a scatter/gather table exported via DMA-BUF by
+ * another driver. Imported buffers must be physically contiguous in memory
+ * (i.e. the scatter/gather table must contain a single entry). Drivers that
+ * use the CMA helpers should set this as their DRM driver's
+ * ->gem_prime_import_sg_table() callback.
+ *
+ * Returns:
+ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
 struct drm_gem_object *
 drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
                                  struct dma_buf_attachment *attach,
@@ -339,6 +473,18 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
 
+/**
+ * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function maps a buffer imported via DRM PRIME into a userspace
+ * process's address space. Drivers that use the CMA helpers should set this
+ * as their DRM driver's ->gem_prime_mmap() callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
 int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
                           struct vm_area_struct *vma)
 {
@@ -357,6 +503,20 @@ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
 
+/**
+ * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
+ *     address space
+ * @obj: GEM object
+ *
+ * This function maps a buffer exported via DRM PRIME into the kernel's
+ * virtual address space. Since the CMA buffers are already mapped into the
+ * kernel virtual address space this simply returns the cached virtual
+ * address. Drivers using the CMA helpers should set this as their DRM
+ * driver's ->gem_prime_vmap() callback.
+ *
+ * Returns:
+ * The kernel virtual address of the CMA GEM object's backing store.
+ */
 void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
 {
        struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
@@ -365,6 +525,17 @@ void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
 
+/**
+ * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
+ *     address space
+ * @obj: GEM object
+ * @vaddr: kernel virtual address where the CMA GEM object was mapped
+ *
+ * This function removes a buffer exported via DRM PRIME from the kernel's
+ * virtual address space. This is a no-op because CMA buffers cannot be
+ * unmapped from kernel space. Drivers using the CMA helpers should set this
+ * as their DRM driver's ->gem_prime_vunmap() callback.
+ */
 void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
 {
        /* Nothing to do */
index 5ef03c216a2740818468ef64ebb1a056f1644235..0e47df4ef24efa2226de1f1fa8b4ea26c76e55cb 100644 (file)
@@ -1029,7 +1029,8 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
 {
        struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
 
-       BUG_ON(atomic_read(&vblank->refcount) == 0);
+       if (WARN_ON(atomic_read(&vblank->refcount) == 0))
+               return;
 
        if (WARN_ON(crtc >= dev->num_crtcs))
                return;
@@ -1190,7 +1191,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
  *
  * This functions restores the vblank interrupt state captured with
  * drm_vblank_off() again. Note that calls to drm_vblank_on() and
- * drm_vblank_off() can be unbalanced and so can also be unconditionaly called
+ * drm_vblank_off() can be unbalanced and so can also be unconditionally called
  * in driver load code to reflect the current hardware state of the crtc.
  *
  * This is the legacy version of drm_crtc_vblank_on().
@@ -1237,7 +1238,7 @@ EXPORT_SYMBOL(drm_vblank_on);
  *
  * This functions restores the vblank interrupt state captured with
  * drm_vblank_off() again. Note that calls to drm_vblank_on() and
- * drm_vblank_off() can be unbalanced and so can also be unconditionaly called
+ * drm_vblank_off() can be unbalanced and so can also be unconditionally called
  * in driver load code to reflect the current hardware state of the crtc.
  *
  * This is the native kms version of drm_vblank_on().
index eb6dfe52cab24e1aa36ef12ee951c7c2d7103d6f..c0644bb865f257f9321f30870bd814bc804b2326 100644 (file)
 
 #include <video/mipi_display.h>
 
+/**
+ * DOC: dsi helpers
+ *
+ * These functions contain some common logic and helpers to deal with MIPI DSI
+ * peripherals.
+ *
+ * Helpers are provided for a number of standard MIPI DSI command as well as a
+ * subset of the MIPI DCS command set.
+ */
+
 static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
 {
        return of_driver_match_device(dev, drv);
@@ -57,6 +67,29 @@ static struct bus_type mipi_dsi_bus_type = {
        .pm = &mipi_dsi_device_pm_ops,
 };
 
+static int of_device_match(struct device *dev, void *data)
+{
+       return dev->of_node == data;
+}
+
+/**
+ * of_find_mipi_dsi_device_by_node() - find the MIPI DSI device matching a
+ *    device tree node
+ * @np: device tree node
+ *
+ * Return: A pointer to the MIPI DSI device corresponding to @np or NULL if no
+ *    such device exists (or has not been registered yet).
+ */
+struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np)
+{
+       struct device *dev;
+
+       dev = bus_find_device(&mipi_dsi_bus_type, NULL, np, of_device_match);
+
+       return dev ? to_mipi_dsi_device(dev) : NULL;
+}
+EXPORT_SYMBOL(of_find_mipi_dsi_device_by_node);
+
 static void mipi_dsi_dev_release(struct device *dev)
 {
        struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
@@ -198,59 +231,351 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi)
 }
 EXPORT_SYMBOL(mipi_dsi_detach);
 
+static ssize_t mipi_dsi_device_transfer(struct mipi_dsi_device *dsi,
+                                       struct mipi_dsi_msg *msg)
+{
+       const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+
+       if (!ops || !ops->transfer)
+               return -ENOSYS;
+
+       if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
+               msg->flags |= MIPI_DSI_MSG_USE_LPM;
+
+       return ops->transfer(dsi->host, msg);
+}
+
 /**
- * mipi_dsi_dcs_write - send DCS write command
- * @dsi: DSI device
- * @data: pointer to the command followed by parameters
- * @len: length of @data
+ * mipi_dsi_packet_format_is_short - check if a packet is of the short format
+ * @type: MIPI DSI data type of the packet
+ *
+ * Return: true if the packet for the given data type is a short packet, false
+ * otherwise.
  */
-ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
-                           size_t len)
+bool mipi_dsi_packet_format_is_short(u8 type)
+{
+       switch (type) {
+       case MIPI_DSI_V_SYNC_START:
+       case MIPI_DSI_V_SYNC_END:
+       case MIPI_DSI_H_SYNC_START:
+       case MIPI_DSI_H_SYNC_END:
+       case MIPI_DSI_END_OF_TRANSMISSION:
+       case MIPI_DSI_COLOR_MODE_OFF:
+       case MIPI_DSI_COLOR_MODE_ON:
+       case MIPI_DSI_SHUTDOWN_PERIPHERAL:
+       case MIPI_DSI_TURN_ON_PERIPHERAL:
+       case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+       case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+       case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+       case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+       case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+       case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+       case MIPI_DSI_DCS_SHORT_WRITE:
+       case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+       case MIPI_DSI_DCS_READ:
+       case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
+               return true;
+       }
+
+       return false;
+}
+EXPORT_SYMBOL(mipi_dsi_packet_format_is_short);
+
+/**
+ * mipi_dsi_packet_format_is_long - check if a packet is of the long format
+ * @type: MIPI DSI data type of the packet
+ *
+ * Return: true if the packet for the given data type is a long packet, false
+ * otherwise.
+ */
+bool mipi_dsi_packet_format_is_long(u8 type)
+{
+       switch (type) {
+       case MIPI_DSI_NULL_PACKET:
+       case MIPI_DSI_BLANKING_PACKET:
+       case MIPI_DSI_GENERIC_LONG_WRITE:
+       case MIPI_DSI_DCS_LONG_WRITE:
+       case MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20:
+       case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24:
+       case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16:
+       case MIPI_DSI_PACKED_PIXEL_STREAM_30:
+       case MIPI_DSI_PACKED_PIXEL_STREAM_36:
+       case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12:
+       case MIPI_DSI_PACKED_PIXEL_STREAM_16:
+       case MIPI_DSI_PACKED_PIXEL_STREAM_18:
+       case MIPI_DSI_PIXEL_STREAM_3BYTE_18:
+       case MIPI_DSI_PACKED_PIXEL_STREAM_24:
+               return true;
+       }
+
+       return false;
+}
+EXPORT_SYMBOL(mipi_dsi_packet_format_is_long);
+
+/**
+ * mipi_dsi_create_packet - create a packet from a message according to the
+ *     DSI protocol
+ * @packet: pointer to a DSI packet structure
+ * @msg: message to translate into a packet
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
+                          const struct mipi_dsi_msg *msg)
+{
+       const u8 *tx = msg->tx_buf;
+
+       if (!packet || !msg)
+               return -EINVAL;
+
+       /* do some minimum sanity checking */
+       if (!mipi_dsi_packet_format_is_short(msg->type) &&
+           !mipi_dsi_packet_format_is_long(msg->type))
+               return -EINVAL;
+
+       if (msg->channel > 3)
+               return -EINVAL;
+
+       memset(packet, 0, sizeof(*packet));
+       packet->header[0] = ((msg->channel & 0x3) << 6) | (msg->type & 0x3f);
+
+       /* TODO: compute ECC if hardware support is not available */
+
+       /*
+        * Long write packets contain the word count in header bytes 1 and 2.
+        * The payload follows the header and is word count bytes long.
+        *
+        * Short write packets encode up to two parameters in header bytes 1
+        * and 2.
+        */
+       if (mipi_dsi_packet_format_is_long(msg->type)) {
+               packet->header[1] = (msg->tx_len >> 0) & 0xff;
+               packet->header[2] = (msg->tx_len >> 8) & 0xff;
+
+               packet->payload_length = msg->tx_len;
+               packet->payload = tx;
+       } else {
+               packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0;
+               packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0;
+       }
+
+       packet->size = sizeof(packet->header) + packet->payload_length;
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_create_packet);
+
+/*
+ * mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of the
+ *    the payload in a long packet transmitted from the peripheral back to the
+ *    host processor
+ * @dsi: DSI peripheral device
+ * @value: the maximum size of the payload
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
+                                           u16 value)
+{
+       u8 tx[2] = { value & 0xff, value >> 8 };
+       struct mipi_dsi_msg msg = {
+               .channel = dsi->channel,
+               .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
+               .tx_len = sizeof(tx),
+               .tx_buf = tx,
+       };
+
+       return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
+
+/**
+ * mipi_dsi_generic_write() - transmit data using a generic write packet
+ * @dsi: DSI peripheral device
+ * @payload: buffer containing the payload
+ * @size: size of payload buffer
+ *
+ * This function will automatically choose the right data type depending on
+ * the payload length.
+ *
+ * Return: The number of bytes transmitted on success or a negative error code
+ * on failure.
+ */
+ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
+                              size_t size)
+{
+       struct mipi_dsi_msg msg = {
+               .channel = dsi->channel,
+               .tx_buf = payload,
+               .tx_len = size
+       };
+
+       switch (size) {
+       case 0:
+               msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM;
+               break;
+
+       case 1:
+               msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM;
+               break;
+
+       case 2:
+               msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM;
+               break;
+
+       default:
+               msg.type = MIPI_DSI_GENERIC_LONG_WRITE;
+               break;
+       }
+
+       return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_generic_write);
+
+/**
+ * mipi_dsi_generic_read() - receive data using a generic read packet
+ * @dsi: DSI peripheral device
+ * @params: buffer containing the request parameters
+ * @num_params: number of request parameters
+ * @data: buffer in which to return the received data
+ * @size: size of receive buffer
+ *
+ * This function will automatically choose the right data type depending on
+ * the number of parameters passed in.
+ *
+ * Return: The number of bytes successfully read or a negative error code on
+ * failure.
+ */
+ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
+                             size_t num_params, void *data, size_t size)
+{
+       struct mipi_dsi_msg msg = {
+               .channel = dsi->channel,
+               .tx_len = num_params,
+               .tx_buf = params,
+               .rx_len = size,
+               .rx_buf = data
+       };
+
+       switch (num_params) {
+       case 0:
+               msg.type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
+               break;
+
+       case 1:
+               msg.type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
+               break;
+
+       case 2:
+               msg.type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_generic_read);
+
+/**
+ * mipi_dsi_dcs_write_buffer() - transmit a DCS command with payload
+ * @dsi: DSI peripheral device
+ * @data: buffer containing data to be transmitted
+ * @len: size of transmission buffer
+ *
+ * This function will automatically choose the right data type depending on
+ * the command payload length.
+ *
+ * Return: The number of bytes successfully transmitted or a negative error
+ * code on failure.
+ */
+ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
+                                 const void *data, size_t len)
 {
-       const struct mipi_dsi_host_ops *ops = dsi->host->ops;
        struct mipi_dsi_msg msg = {
                .channel = dsi->channel,
                .tx_buf = data,
                .tx_len = len
        };
 
-       if (!ops || !ops->transfer)
-               return -ENOSYS;
-
        switch (len) {
        case 0:
                return -EINVAL;
+
        case 1:
                msg.type = MIPI_DSI_DCS_SHORT_WRITE;
                break;
+
        case 2:
                msg.type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
                break;
+
        default:
                msg.type = MIPI_DSI_DCS_LONG_WRITE;
                break;
        }
 
-       if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
-               msg.flags = MIPI_DSI_MSG_USE_LPM;
+       return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_write_buffer);
 
-       return ops->transfer(dsi->host, &msg);
+/**
+ * mipi_dsi_dcs_write() - send DCS write command
+ * @dsi: DSI peripheral device
+ * @cmd: DCS command
+ * @data: buffer containing the command payload
+ * @len: command payload length
+ *
+ * This function will automatically choose the right data type depending on
+ * the command payload length.
+ *
+ * Return: The number of bytes successfully transmitted or a negative error
+ * code on failure.
+ */
+ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
+                          const void *data, size_t len)
+{
+       ssize_t err;
+       size_t size;
+       u8 *tx;
+
+       if (len > 0) {
+               size = 1 + len;
+
+               tx = kmalloc(size, GFP_KERNEL);
+               if (!tx)
+                       return -ENOMEM;
+
+               /* concatenate the DCS command byte and the payload */
+               tx[0] = cmd;
+               memcpy(&tx[1], data, len);
+       } else {
+               tx = &cmd;
+               size = 1;
+       }
+
+       err = mipi_dsi_dcs_write_buffer(dsi, tx, size);
+
+       if (len > 0)
+               kfree(tx);
+
+       return err;
 }
 EXPORT_SYMBOL(mipi_dsi_dcs_write);
 
 /**
- * mipi_dsi_dcs_read - send DCS read request command
- * @dsi: DSI device
- * @cmd: DCS read command
- * @data: pointer to read buffer
- * @len: length of @data
+ * mipi_dsi_dcs_read() - send DCS read request command
+ * @dsi: DSI peripheral device
+ * @cmd: DCS command
+ * @data: buffer in which to receive data
+ * @len: size of receive buffer
  *
- * Function returns number of read bytes or error code.
+ * Return: The number of bytes read or a negative error code on failure.
  */
 ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
                          size_t len)
 {
-       const struct mipi_dsi_host_ops *ops = dsi->host->ops;
        struct mipi_dsi_msg msg = {
                .channel = dsi->channel,
                .type = MIPI_DSI_DCS_READ,
@@ -260,15 +585,282 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
                .rx_len = len
        };
 
-       if (!ops || !ops->transfer)
-               return -ENOSYS;
+       return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_read);
 
-       if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
-               msg.flags = MIPI_DSI_MSG_USE_LPM;
+/**
+ * mipi_dsi_dcs_nop() - send DCS nop packet
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi)
+{
+       ssize_t err;
+
+       err = mipi_dsi_dcs_write(dsi, MIPI_DCS_NOP, NULL, 0);
+       if (err < 0)
+               return err;
 
-       return ops->transfer(dsi->host, &msg);
+       return 0;
 }
-EXPORT_SYMBOL(mipi_dsi_dcs_read);
+EXPORT_SYMBOL(mipi_dsi_dcs_nop);
+
+/**
+ * mipi_dsi_dcs_soft_reset() - perform a software reset of the display module
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi)
+{
+       ssize_t err;
+
+       err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SOFT_RESET, NULL, 0);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_soft_reset);
+
+/**
+ * mipi_dsi_dcs_get_power_mode() - query the display module's current power
+ *    mode
+ * @dsi: DSI peripheral device
+ * @mode: return location for the current power mode
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode)
+{
+       ssize_t err;
+
+       err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_POWER_MODE, mode,
+                               sizeof(*mode));
+       if (err <= 0) {
+               if (err == 0)
+                       err = -ENODATA;
+
+               return err;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_get_power_mode);
+
+/**
+ * mipi_dsi_dcs_get_pixel_format() - gets the pixel format for the RGB image
+ *    data used by the interface
+ * @dsi: DSI peripheral device
+ * @format: return location for the pixel format
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format)
+{
+       ssize_t err;
+
+       err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_PIXEL_FORMAT, format,
+                               sizeof(*format));
+       if (err <= 0) {
+               if (err == 0)
+                       err = -ENODATA;
+
+               return err;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_get_pixel_format);
+
+/**
+ * mipi_dsi_dcs_enter_sleep_mode() - disable all unnecessary blocks inside the
+ *    display module except interface communication
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi)
+{
+       ssize_t err;
+
+       err = mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_enter_sleep_mode);
+
+/**
+ * mipi_dsi_dcs_exit_sleep_mode() - enable all blocks inside the display
+ *    module
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi)
+{
+       ssize_t err;
+
+       err = mipi_dsi_dcs_write(dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_exit_sleep_mode);
+
+/**
+ * mipi_dsi_dcs_set_display_off() - stop displaying the image data on the
+ *    display device
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi)
+{
+       ssize_t err;
+
+       err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_display_off);
+
+/**
+ * mipi_dsi_dcs_set_display_on() - start displaying the image data on the
+ *    display device
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi)
+{
+       ssize_t err;
+
+       err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_display_on);
+
+/**
+ * mipi_dsi_dcs_set_column_address() - define the column extent of the frame
+ *    memory accessed by the host processor
+ * @dsi: DSI peripheral device
+ * @start: first column of frame memory
+ * @end: last column of frame memory
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
+                                   u16 end)
+{
+       u8 payload[4] = { start >> 8, start & 0xff, end >> 8, end & 0xff };
+       ssize_t err;
+
+       err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_COLUMN_ADDRESS, payload,
+                                sizeof(payload));
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_column_address);
+
+/**
+ * mipi_dsi_dcs_set_page_address() - define the page extent of the frame
+ *    memory accessed by the host processor
+ * @dsi: DSI peripheral device
+ * @start: first page of frame memory
+ * @end: last page of frame memory
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
+                                 u16 end)
+{
+       u8 payload[4] = { start >> 8, start & 0xff, end >> 8, end & 0xff };
+       ssize_t err;
+
+       err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PAGE_ADDRESS, payload,
+                                sizeof(payload));
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_page_address);
+
+/**
+ * mipi_dsi_dcs_set_tear_off() - turn off the display module's Tearing Effect
+ *    output signal on the TE signal line
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi)
+{
+       ssize_t err;
+
+       err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_OFF, NULL, 0);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_off);
+
+/**
+ * mipi_dsi_dcs_set_tear_on() - turn on the display module's Tearing Effect
+ *    output signal on the TE signal line.
+ * @dsi: DSI peripheral device
+ * @mode: the Tearing Effect Output Line mode
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
+                            enum mipi_dsi_dcs_tear_mode mode)
+{
+       u8 value = mode;
+       ssize_t err;
+
+       err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_ON, &value,
+                                sizeof(value));
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
+
+/**
+ * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
+ *    data used by the interface
+ * @dsi: DSI peripheral device
+ * @format: pixel format
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format)
+{
+       ssize_t err;
+
+       err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PIXEL_FORMAT, &format,
+                                sizeof(format));
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
 
 static int mipi_dsi_drv_probe(struct device *dev)
 {
@@ -295,12 +887,18 @@ static void mipi_dsi_drv_shutdown(struct device *dev)
 }
 
 /**
- * mipi_dsi_driver_register - register a driver for DSI devices
+ * mipi_dsi_driver_register_full() - register a driver for DSI devices
  * @drv: DSI driver structure
+ * @owner: owner module
+ *
+ * Return: 0 on success or a negative error code on failure.
  */
-int mipi_dsi_driver_register(struct mipi_dsi_driver *drv)
+int mipi_dsi_driver_register_full(struct mipi_dsi_driver *drv,
+                                 struct module *owner)
 {
        drv->driver.bus = &mipi_dsi_bus_type;
+       drv->driver.owner = owner;
+
        if (drv->probe)
                drv->driver.probe = mipi_dsi_drv_probe;
        if (drv->remove)
@@ -310,11 +908,13 @@ int mipi_dsi_driver_register(struct mipi_dsi_driver *drv)
 
        return driver_register(&drv->driver);
 }
-EXPORT_SYMBOL(mipi_dsi_driver_register);
+EXPORT_SYMBOL(mipi_dsi_driver_register_full);
 
 /**
- * mipi_dsi_driver_unregister - unregister a driver for DSI devices
+ * mipi_dsi_driver_unregister() - unregister a driver for DSI devices
  * @drv: DSI driver structure
+ *
+ * Return: 0 on success or a negative error code on failure.
  */
 void mipi_dsi_driver_unregister(struct mipi_dsi_driver *drv)
 {
index d1b7d20065293338ccf88282369dda6a59c78c7f..6d8b941c8200414f1bf3b193cbdc395c27e8622d 100644 (file)
@@ -914,7 +914,7 @@ EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
  *
  * This function is a helper which can be used to validate modes against size
  * limitations of the DRM device/connector. If a mode is too big its status
- * memeber is updated with the appropriate validation failure code. The list
+ * member is updated with the appropriate validation failure code. The list
  * itself is not changed.
  */
 void drm_mode_validate_size(struct drm_device *dev,
index 474e4d12a2d8a73109a35fe3eb3b9948635fedaa..51cc47d827d82f09f1ed9d0e4200e299b4fa628c 100644 (file)
@@ -157,14 +157,20 @@ void drm_modeset_unlock_all(struct drm_device *dev)
 EXPORT_SYMBOL(drm_modeset_unlock_all);
 
 /**
- * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx
- * @crtc: drm crtc
+ * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
+ * @crtc: DRM CRTC
+ * @plane: DRM plane to be updated on @crtc
+ *
+ * This function locks the given crtc and plane (which should be either the
+ * primary or cursor plane) using a hidden acquire context. This is necessary so
+ * that drivers internally using the atomic interfaces can grab further locks
+ * with the lock acquire context.
  *
- * This function locks the given crtc using a hidden acquire context. This is
- * necessary so that drivers internally using the atomic interfaces can grab
- * further locks with the lock acquire context.
+ * Note that @plane can be NULL, e.g. when the cursor support hasn't yet been
+ * converted to universal planes yet.
  */
-void drm_modeset_lock_crtc(struct drm_crtc *crtc)
+void drm_modeset_lock_crtc(struct drm_crtc *crtc,
+                          struct drm_plane *plane)
 {
        struct drm_modeset_acquire_ctx *ctx;
        int ret;
@@ -180,6 +186,18 @@ retry:
        if (ret)
                goto fail;
 
+       if (plane) {
+               ret = drm_modeset_lock(&plane->mutex, ctx);
+               if (ret)
+                       goto fail;
+
+               if (plane->crtc) {
+                       ret = drm_modeset_lock(&plane->crtc->mutex, ctx);
+                       if (ret)
+                               goto fail;
+               }
+       }
+
        WARN_ON(crtc->acquire_ctx);
 
        /* now we hold the locks, so now that it is safe, stash the
@@ -437,15 +455,14 @@ void drm_modeset_unlock(struct drm_modeset_lock *lock)
 }
 EXPORT_SYMBOL(drm_modeset_unlock);
 
-/* Temporary.. until we have sufficiently fine grained locking, there
- * are a couple scenarios where it is convenient to grab all crtc locks.
- * It is planned to remove this:
- */
+/* In some legacy codepaths it's convenient to just grab all the crtc and plane
+ * related locks. */
 int drm_modeset_lock_all_crtcs(struct drm_device *dev,
                struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_mode_config *config = &dev->mode_config;
        struct drm_crtc *crtc;
+       struct drm_plane *plane;
        int ret = 0;
 
        list_for_each_entry(crtc, &config->crtc_list, head) {
@@ -454,6 +471,12 @@ int drm_modeset_lock_all_crtcs(struct drm_device *dev,
                        return ret;
        }
 
+       list_for_each_entry(plane, &config->plane_list, head) {
+               ret = drm_modeset_lock(&plane->mutex, ctx);
+               if (ret)
+                       return ret;
+       }
+
        return 0;
 }
 EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
index 827ec1a3040b202fc025a9d1b8af54e32b67726f..18a1ac6ac22f396aa55b34187b7fb89a9392613b 100644 (file)
 #include <drm/drmP.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_rect.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
 
 #define SUBPIXEL_MASK 0xffff
 
+/**
+ * DOC: overview
+ *
+ * This helper library has two parts. The first part has support to implement
+ * primary plane support on top of the normal CRTC configuration interface.
+ * Since the legacy ->set_config interface ties the primary plane together with
+ * the CRTC state this does not allow userspace to disable the primary plane
+ * itself.  To avoid too much duplicated code use
+ * drm_plane_helper_check_update() which can be used to enforce the same
+ * restrictions as primary planes had thus. The default primary plane only
+ * expose XRBG8888 and ARGB8888 as valid pixel formats for the attached
+ * framebuffer.
+ *
+ * Drivers are highly recommended to implement proper support for primary
+ * planes, and newly merged drivers must not rely upon these transitional
+ * helpers.
+ *
+ * The second part also implements transitional helpers which allow drivers to
+ * gradually switch to the atomic helper infrastructure for plane updates. Once
+ * that switch is complete drivers shouldn't use these any longer, instead using
+ * the proper legacy implementations for update and disable plane hooks provided
+ * by the atomic helpers.
+ *
+ * Again drivers are strongly urged to switch to the new interfaces.
+ */
+
 /*
  * This is the minimal list of formats that seem to be safe for modeset use
  * with all current DRM drivers.  Most hardware can actually support more
@@ -127,6 +155,11 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
                return -ERANGE;
        }
 
+       if (!fb) {
+               *visible = false;
+               return 0;
+       }
+
        *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
        if (!*visible)
                /*
@@ -369,3 +402,171 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
        return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs);
 }
 EXPORT_SYMBOL(drm_crtc_init);
+
+int drm_plane_helper_commit(struct drm_plane *plane,
+                           struct drm_plane_state *plane_state,
+                           struct drm_framebuffer *old_fb)
+{
+       struct drm_plane_helper_funcs *plane_funcs;
+       struct drm_crtc *crtc[2];
+       struct drm_crtc_helper_funcs *crtc_funcs[2];
+       int i, ret = 0;
+
+       plane_funcs = plane->helper_private;
+
+       /* Since this is a transitional helper we can't assume that plane->state
+        * is always valid. Hence we need to use plane->crtc instead of
+        * plane->state->crtc as the old crtc. */
+       crtc[0] = plane->crtc;
+       crtc[1] = crtc[0] != plane_state->crtc ? plane_state->crtc : NULL;
+
+       for (i = 0; i < 2; i++)
+               crtc_funcs[i] = crtc[i] ? crtc[i]->helper_private : NULL;
+
+       if (plane_funcs->atomic_check) {
+               ret = plane_funcs->atomic_check(plane, plane_state);
+               if (ret)
+                       goto out;
+       }
+
+       if (plane_funcs->prepare_fb && plane_state->fb) {
+               ret = plane_funcs->prepare_fb(plane, plane_state->fb);
+               if (ret)
+                       goto out;
+       }
+
+       /* Point of no return, commit sw state. */
+       swap(plane->state, plane_state);
+
+       for (i = 0; i < 2; i++) {
+               if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin)
+                       crtc_funcs[i]->atomic_begin(crtc[i]);
+       }
+
+       plane_funcs->atomic_update(plane, plane_state);
+
+       for (i = 0; i < 2; i++) {
+               if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
+                       crtc_funcs[i]->atomic_flush(crtc[i]);
+       }
+
+       for (i = 0; i < 2; i++) {
+               if (!crtc[i])
+                       continue;
+
+               /* There's no other way to figure out whether the crtc is running. */
+               ret = drm_crtc_vblank_get(crtc[i]);
+               if (ret == 0) {
+                       drm_crtc_wait_one_vblank(crtc[i]);
+                       drm_crtc_vblank_put(crtc[i]);
+               }
+
+               ret = 0;
+       }
+
+       if (plane_funcs->cleanup_fb && old_fb)
+               plane_funcs->cleanup_fb(plane, old_fb);
+out:
+       if (plane_state) {
+               if (plane->funcs->atomic_destroy_state)
+                       plane->funcs->atomic_destroy_state(plane, plane_state);
+               else
+                       drm_atomic_helper_plane_destroy_state(plane, plane_state);
+       }
+
+       return ret;
+}
+
+/**
+ * drm_plane_helper_update() - Helper for primary plane update
+ * @plane: plane object to update
+ * @crtc: owning CRTC of owning plane
+ * @fb: framebuffer to flip onto plane
+ * @crtc_x: x offset of primary plane on crtc
+ * @crtc_y: y offset of primary plane on crtc
+ * @crtc_w: width of primary plane rectangle on crtc
+ * @crtc_h: height of primary plane rectangle on crtc
+ * @src_x: x offset of @fb for panning
+ * @src_y: y offset of @fb for panning
+ * @src_w: width of source rectangle in @fb
+ * @src_h: height of source rectangle in @fb
+ *
+ * Provides a default plane update handler using the atomic plane update
+ * functions. It is fully left to the driver to check plane constraints and
+ * handle corner-cases like a fully occluded or otherwise invisible plane.
+ *
+ * This is useful for piecewise transitioning of a driver to the atomic helpers.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
+                           struct drm_framebuffer *fb,
+                           int crtc_x, int crtc_y,
+                           unsigned int crtc_w, unsigned int crtc_h,
+                           uint32_t src_x, uint32_t src_y,
+                           uint32_t src_w, uint32_t src_h)
+{
+       struct drm_plane_state *plane_state;
+
+       if (plane->funcs->atomic_duplicate_state)
+               plane_state = plane->funcs->atomic_duplicate_state(plane);
+       else if (plane->state)
+               plane_state = drm_atomic_helper_plane_duplicate_state(plane);
+       else
+               plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+       if (!plane_state)
+               return -ENOMEM;
+
+       plane_state->crtc = crtc;
+       drm_atomic_set_fb_for_plane(plane_state, fb);
+       plane_state->crtc_x = crtc_x;
+       plane_state->crtc_y = crtc_y;
+       plane_state->crtc_h = crtc_h;
+       plane_state->crtc_w = crtc_w;
+       plane_state->src_x = src_x;
+       plane_state->src_y = src_y;
+       plane_state->src_h = src_h;
+       plane_state->src_w = src_w;
+
+       return drm_plane_helper_commit(plane, plane_state, plane->fb);
+}
+EXPORT_SYMBOL(drm_plane_helper_update);
+
+/**
+ * drm_plane_helper_disable() - Helper for primary plane disable
+ * @plane: plane to disable
+ *
+ * Provides a default plane disable handler using the atomic plane update
+ * functions. It is fully left to the driver to check plane constraints and
+ * handle corner-cases like a fully occluded or otherwise invisible plane.
+ *
+ * This is useful for piecewise transitioning of a driver to the atomic helpers.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_plane_helper_disable(struct drm_plane *plane)
+{
+       struct drm_plane_state *plane_state;
+
+       /* crtc helpers love to call disable functions for already disabled hw
+        * functions. So cope with that. */
+       if (!plane->crtc)
+               return 0;
+
+       if (plane->funcs->atomic_duplicate_state)
+               plane_state = plane->funcs->atomic_duplicate_state(plane);
+       else if (plane->state)
+               plane_state = drm_atomic_helper_plane_duplicate_state(plane);
+       else
+               plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+       if (!plane_state)
+               return -ENOMEM;
+
+       plane_state->crtc = NULL;
+       drm_atomic_set_fb_for_plane(plane_state, NULL);
+
+       return drm_plane_helper_commit(plane, plane_state, plane->fb);
+}
+EXPORT_SYMBOL(drm_plane_helper_disable);
index 78ca30808422fc56c67acb4659f38f06a555b1db..7482b06cd08fc60f35da2099ed0d3ed88cf11802 100644 (file)
@@ -328,7 +328,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
  */
 
 /**
- * drm_gem_prime_export - helper library implemention of the export callback
+ * drm_gem_prime_export - helper library implementation of the export callback
  * @dev: drm_device to export from
  * @obj: GEM object to export
  * @flags: flags like DRM_CLOEXEC
@@ -483,7 +483,7 @@ out_unlock:
 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
 
 /**
- * drm_gem_prime_import - helper library implemention of the import callback
+ * drm_gem_prime_import - helper library implementation of the import callback
  * @dev: drm_device to import into
  * @dma_buf: dma-buf object to import
  *
@@ -669,7 +669,7 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
  * the driver is responsible for mapping the pages into the
  * importers address space for use with dma_buf itself.
  */
-struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
+struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
 {
        struct sg_table *sg = NULL;
        int ret;
index 6857e9ad6339c7a8ab94aea908be59323985df44..7483a47de8e41630a37e7db22c5e9585dff11013 100644 (file)
@@ -118,7 +118,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
                mode->status = MODE_UNVERIFIED;
 
        if (connector->force) {
-               if (connector->force == DRM_FORCE_ON)
+               if (connector->force == DRM_FORCE_ON ||
+                   connector->force == DRM_FORCE_ON_DIGITAL)
                        connector->status = connector_status_connected;
                else
                        connector->status = connector_status_disconnected;
index 6adb1e5cfb086499537ea786a13e709da87a59ec..34d46aa75416aaa024e8aec4bb5867c570f3e3fb 100644 (file)
 #include <drm/drm_panel.h>
 #include <drm/bridge/ptn3460.h>
 
-#include "exynos_drm_drv.h"
 #include "exynos_dp_core.h"
 
 #define ctx_from_connector(c)  container_of(c, struct exynos_dp_device, \
                                        connector)
 
+static inline struct exynos_dp_device *
+display_to_dp(struct exynos_drm_display *d)
+{
+       return container_of(d, struct exynos_dp_device, display);
+}
+
 struct bridge_init {
        struct i2c_client *client;
        struct device_node *node;
@@ -882,7 +887,7 @@ static void exynos_dp_hotplug(struct work_struct *work)
 
 static void exynos_dp_commit(struct exynos_drm_display *display)
 {
-       struct exynos_dp_device *dp = display->ctx;
+       struct exynos_dp_device *dp = display_to_dp(display);
        int ret;
 
        /* Keep the panel disabled while we configure video */
@@ -1020,7 +1025,7 @@ static int exynos_drm_attach_lcd_bridge(struct drm_device *dev,
 static int exynos_dp_create_connector(struct exynos_drm_display *display,
                                struct drm_encoder *encoder)
 {
-       struct exynos_dp_device *dp = display->ctx;
+       struct exynos_dp_device *dp = display_to_dp(display);
        struct drm_connector *connector = &dp->connector;
        int ret;
 
@@ -1052,33 +1057,19 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display,
 
 static void exynos_dp_phy_init(struct exynos_dp_device *dp)
 {
-       if (dp->phy) {
+       if (dp->phy)
                phy_power_on(dp->phy);
-       } else if (dp->phy_addr) {
-               u32 reg;
-
-               reg = __raw_readl(dp->phy_addr);
-               reg |= dp->enable_mask;
-               __raw_writel(reg, dp->phy_addr);
-       }
 }
 
 static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
 {
-       if (dp->phy) {
+       if (dp->phy)
                phy_power_off(dp->phy);
-       } else if (dp->phy_addr) {
-               u32 reg;
-
-               reg = __raw_readl(dp->phy_addr);
-               reg &= ~(dp->enable_mask);
-               __raw_writel(reg, dp->phy_addr);
-       }
 }
 
 static void exynos_dp_poweron(struct exynos_drm_display *display)
 {
-       struct exynos_dp_device *dp = display->ctx;
+       struct exynos_dp_device *dp = display_to_dp(display);
 
        if (dp->dpms_mode == DRM_MODE_DPMS_ON)
                return;
@@ -1099,7 +1090,7 @@ static void exynos_dp_poweron(struct exynos_drm_display *display)
 
 static void exynos_dp_poweroff(struct exynos_drm_display *display)
 {
-       struct exynos_dp_device *dp = display->ctx;
+       struct exynos_dp_device *dp = display_to_dp(display);
 
        if (dp->dpms_mode != DRM_MODE_DPMS_ON)
                return;
@@ -1124,7 +1115,7 @@ static void exynos_dp_poweroff(struct exynos_drm_display *display)
 
 static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
 {
-       struct exynos_dp_device *dp = display->ctx;
+       struct exynos_dp_device *dp = display_to_dp(display);
 
        switch (mode) {
        case DRM_MODE_DPMS_ON:
@@ -1147,11 +1138,6 @@ static struct exynos_drm_display_ops exynos_dp_display_ops = {
        .commit = exynos_dp_commit,
 };
 
-static struct exynos_drm_display exynos_dp_display = {
-       .type = EXYNOS_DISPLAY_TYPE_LCD,
-       .ops = &exynos_dp_display_ops,
-};
-
 static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
 {
        struct device_node *dp_node = dev->of_node;
@@ -1210,44 +1196,6 @@ static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
        return dp_video_config;
 }
 
-static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
-{
-       struct device_node *dp_phy_node = of_node_get(dp->dev->of_node);
-       u32 phy_base;
-       int ret = 0;
-
-       dp_phy_node = of_find_node_by_name(dp_phy_node, "dptx-phy");
-       if (!dp_phy_node) {
-               dp->phy = devm_phy_get(dp->dev, "dp");
-               return PTR_ERR_OR_ZERO(dp->phy);
-       }
-
-       if (of_property_read_u32(dp_phy_node, "reg", &phy_base)) {
-               dev_err(dp->dev, "failed to get reg for dptx-phy\n");
-               ret = -EINVAL;
-               goto err;
-       }
-
-       if (of_property_read_u32(dp_phy_node, "samsung,enable-mask",
-                               &dp->enable_mask)) {
-               dev_err(dp->dev, "failed to get enable-mask for dptx-phy\n");
-               ret = -EINVAL;
-               goto err;
-       }
-
-       dp->phy_addr = ioremap(phy_base, SZ_4);
-       if (!dp->phy_addr) {
-               dev_err(dp->dev, "failed to ioremap dp-phy\n");
-               ret = -ENOMEM;
-               goto err;
-       }
-
-err:
-       of_node_put(dp_phy_node);
-
-       return ret;
-}
-
 static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
 {
        int ret;
@@ -1263,10 +1211,10 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
 
 static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
 {
+       struct exynos_dp_device *dp = dev_get_drvdata(dev);
        struct platform_device *pdev = to_platform_device(dev);
        struct drm_device *drm_dev = data;
        struct resource *res;
-       struct exynos_dp_device *dp = exynos_dp_display.ctx;
        unsigned int irq_flags;
        int ret = 0;
 
@@ -1277,9 +1225,21 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
        if (IS_ERR(dp->video_info))
                return PTR_ERR(dp->video_info);
 
-       ret = exynos_dp_dt_parse_phydata(dp);
-       if (ret)
-               return ret;
+       dp->phy = devm_phy_get(dp->dev, "dp");
+       if (IS_ERR(dp->phy)) {
+               dev_err(dp->dev, "no DP phy configured\n");
+               ret = PTR_ERR(dp->phy);
+               if (ret) {
+                       /*
+                        * phy itself is not enabled, so we can move forward
+                        * assigning NULL to phy pointer.
+                        */
+                       if (ret == -ENOSYS || ret == -ENODEV)
+                               dp->phy = NULL;
+                       else
+                               return ret;
+               }
+       }
 
        if (!dp->panel) {
                ret = exynos_dp_dt_parse_panel(dp);
@@ -1346,17 +1306,15 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
 
        dp->drm_dev = drm_dev;
 
-       platform_set_drvdata(pdev, &exynos_dp_display);
-
-       return exynos_drm_create_enc_conn(drm_dev, &exynos_dp_display);
+       return exynos_drm_create_enc_conn(drm_dev, &dp->display);
 }
 
 static void exynos_dp_unbind(struct device *dev, struct device *master,
                                void *data)
 {
-       struct exynos_drm_display *display = dev_get_drvdata(dev);
+       struct exynos_dp_device *dp = dev_get_drvdata(dev);
 
-       exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
+       exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
 }
 
 static const struct component_ops exynos_dp_ops = {
@@ -1371,16 +1329,20 @@ static int exynos_dp_probe(struct platform_device *pdev)
        struct exynos_dp_device *dp;
        int ret;
 
-       ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
-                                       exynos_dp_display.type);
-       if (ret)
-               return ret;
-
        dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
                                GFP_KERNEL);
        if (!dp)
                return -ENOMEM;
 
+       dp->display.type = EXYNOS_DISPLAY_TYPE_LCD;
+       dp->display.ops = &exynos_dp_display_ops;
+       platform_set_drvdata(pdev, dp);
+
+       ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+                                       dp->display.type);
+       if (ret)
+               return ret;
+
        panel_node = of_parse_phandle(dev->of_node, "panel", 0);
        if (panel_node) {
                dp->panel = of_drm_find_panel(panel_node);
@@ -1389,8 +1351,6 @@ static int exynos_dp_probe(struct platform_device *pdev)
                        return -EPROBE_DEFER;
        }
 
-       exynos_dp_display.ctx = dp;
-
        ret = component_add(&pdev->dev, &exynos_dp_ops);
        if (ret)
                exynos_drm_component_del(&pdev->dev,
@@ -1410,19 +1370,17 @@ static int exynos_dp_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int exynos_dp_suspend(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct exynos_drm_display *display = platform_get_drvdata(pdev);
+       struct exynos_dp_device *dp = dev_get_drvdata(dev);
 
-       exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
+       exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
        return 0;
 }
 
 static int exynos_dp_resume(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct exynos_drm_display *display = platform_get_drvdata(pdev);
+       struct exynos_dp_device *dp = dev_get_drvdata(dev);
 
-       exynos_dp_dpms(display, DRM_MODE_DPMS_ON);
+       exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_ON);
        return 0;
 }
 #endif
index a1aee6931bd74afb54928b3b40be150a4dba8483..164f171168e7ef175d9d726e1f1ca231c8da2e49 100644 (file)
@@ -17,6 +17,8 @@
 #include <drm/drm_dp_helper.h>
 #include <drm/exynos_drm.h>
 
+#include "exynos_drm_drv.h"
+
 #define DP_TIMEOUT_LOOP_COUNT 100
 #define MAX_CR_LOOP 5
 #define MAX_EQ_LOOP 5
@@ -145,6 +147,7 @@ struct link_train {
 };
 
 struct exynos_dp_device {
+       struct exynos_drm_display display;
        struct device           *dev;
        struct drm_device       *drm_dev;
        struct drm_connector    connector;
@@ -153,8 +156,6 @@ struct exynos_dp_device {
        struct clk              *clock;
        unsigned int            irq;
        void __iomem            *reg_base;
-       void __iomem            *phy_addr;
-       unsigned int            enable_mask;
 
        struct video_info       *video_info;
        struct link_train       link_train;
index 690dcddab725658528aa3936298204db206c317c..e353d353836fe87d3f8151de6559db24a9fe70a5 100644 (file)
 #ifndef _EXYNOS_DRM_CRTC_H_
 #define _EXYNOS_DRM_CRTC_H_
 
-struct drm_device;
-struct drm_crtc;
-struct exynos_drm_manager;
-struct exynos_drm_overlay;
+#include "exynos_drm_drv.h"
 
 int exynos_drm_crtc_create(struct exynos_drm_manager *manager);
 int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
index 3dc678ed9949241737d1b0055044040d704439dd..37678cf4425ad515eaac8a6c244cd53a5ced3c5e 100644 (file)
@@ -22,6 +22,7 @@
 #include "exynos_drm_drv.h"
 
 struct exynos_dpi {
+       struct exynos_drm_display display;
        struct device *dev;
        struct device_node *panel_node;
 
@@ -35,6 +36,11 @@ struct exynos_dpi {
 
 #define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector)
 
+static inline struct exynos_dpi *display_to_dpi(struct exynos_drm_display *d)
+{
+       return container_of(d, struct exynos_dpi, display);
+}
+
 static enum drm_connector_status
 exynos_dpi_detect(struct drm_connector *connector, bool force)
 {
@@ -100,7 +106,7 @@ static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
 static int exynos_dpi_create_connector(struct exynos_drm_display *display,
                                       struct drm_encoder *encoder)
 {
-       struct exynos_dpi *ctx = display->ctx;
+       struct exynos_dpi *ctx = display_to_dpi(display);
        struct drm_connector *connector = &ctx->connector;
        int ret;
 
@@ -141,7 +147,7 @@ static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
 
 static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
 {
-       struct exynos_dpi *ctx = display->ctx;
+       struct exynos_dpi *ctx = display_to_dpi(display);
 
        switch (mode) {
        case DRM_MODE_DPMS_ON:
@@ -165,11 +171,6 @@ static struct exynos_drm_display_ops exynos_dpi_display_ops = {
        .dpms = exynos_dpi_dpms
 };
 
-static struct exynos_drm_display exynos_dpi_display = {
-       .type = EXYNOS_DISPLAY_TYPE_LCD,
-       .ops = &exynos_dpi_display_ops,
-};
-
 /* of_* functions will be removed after merge of of_graph patches */
 static struct device_node *
 of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg)
@@ -299,20 +300,21 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
        struct exynos_dpi *ctx;
        int ret;
 
-       ret = exynos_drm_component_add(dev,
-                                       EXYNOS_DEVICE_TYPE_CONNECTOR,
-                                       exynos_dpi_display.type);
-       if (ret)
-               return ERR_PTR(ret);
-
        ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
-               goto err_del_component;
+               return ERR_PTR(-ENOMEM);
 
+       ctx->display.type = EXYNOS_DISPLAY_TYPE_LCD;
+       ctx->display.ops = &exynos_dpi_display_ops;
        ctx->dev = dev;
-       exynos_dpi_display.ctx = ctx;
        ctx->dpms_mode = DRM_MODE_DPMS_OFF;
 
+       ret = exynos_drm_component_add(dev,
+                                       EXYNOS_DEVICE_TYPE_CONNECTOR,
+                                       ctx->display.type);
+       if (ret)
+               return ERR_PTR(ret);
+
        ret = exynos_dpi_parse_dt(ctx);
        if (ret < 0) {
                devm_kfree(dev, ctx);
@@ -328,7 +330,7 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
                }
        }
 
-       return &exynos_dpi_display;
+       return &ctx->display;
 
 err_del_component:
        exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
@@ -336,16 +338,16 @@ err_del_component:
        return NULL;
 }
 
-int exynos_dpi_remove(struct device *dev)
+int exynos_dpi_remove(struct exynos_drm_display *display)
 {
-       struct exynos_dpi *ctx = exynos_dpi_display.ctx;
+       struct exynos_dpi *ctx = display_to_dpi(display);
 
-       exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF);
+       exynos_dpi_dpms(&ctx->display, DRM_MODE_DPMS_OFF);
 
        if (ctx->panel)
                drm_panel_detach(ctx->panel);
 
-       exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+       exynos_drm_component_del(ctx->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
 
        return 0;
 }
index c57466edf45b86e39ba292c9d37117a685fdaabd..25ba3628960a3511460b156d1cf264a8eac72ce5 100644 (file)
@@ -203,8 +203,6 @@ static int exynos_drm_resume(struct drm_device *dev)
        }
        drm_modeset_unlock_all(dev);
 
-       drm_helper_resume_force_mode(dev);
-
        return 0;
 }
 
@@ -475,8 +473,6 @@ void exynos_drm_component_del(struct device *dev,
                        list_del(&cdev->list);
                        kfree(cdev);
                }
-
-               break;
        }
 
        mutex_unlock(&drm_component_lock);
@@ -495,6 +491,12 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
 
        mutex_lock(&drm_component_lock);
 
+       /* Do not retry to probe if there is no any kms driver regitered. */
+       if (list_empty(&drm_component_list)) {
+               mutex_unlock(&drm_component_lock);
+               return ERR_PTR(-ENODEV);
+       }
+
        list_for_each_entry(cdev, &drm_component_list, list) {
                /*
                 * Add components to master only in case that crtc and
@@ -550,183 +552,68 @@ static const struct component_master_ops exynos_drm_ops = {
        .unbind         = exynos_drm_unbind,
 };
 
-static int exynos_drm_platform_probe(struct platform_device *pdev)
-{
-       struct component_match *match;
-       int ret;
-
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-       exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
-
+static struct platform_driver *const exynos_drm_kms_drivers[] = {
 #ifdef CONFIG_DRM_EXYNOS_FIMD
-       ret = platform_driver_register(&fimd_driver);
-       if (ret < 0)
-               return ret;
+       &fimd_driver,
 #endif
-
 #ifdef CONFIG_DRM_EXYNOS_DP
-       ret = platform_driver_register(&dp_driver);
-       if (ret < 0)
-               goto err_unregister_fimd_drv;
+       &dp_driver,
 #endif
-
 #ifdef CONFIG_DRM_EXYNOS_DSI
-       ret = platform_driver_register(&dsi_driver);
-       if (ret < 0)
-               goto err_unregister_dp_drv;
+       &dsi_driver,
 #endif
-
 #ifdef CONFIG_DRM_EXYNOS_HDMI
-       ret = platform_driver_register(&mixer_driver);
-       if (ret < 0)
-               goto err_unregister_dsi_drv;
-       ret = platform_driver_register(&hdmi_driver);
-       if (ret < 0)
-               goto err_unregister_mixer_drv;
+       &mixer_driver,
+       &hdmi_driver,
 #endif
+};
 
+static struct platform_driver *const exynos_drm_non_kms_drivers[] = {
 #ifdef CONFIG_DRM_EXYNOS_G2D
-       ret = platform_driver_register(&g2d_driver);
-       if (ret < 0)
-               goto err_unregister_hdmi_drv;
+       &g2d_driver,
 #endif
-
 #ifdef CONFIG_DRM_EXYNOS_FIMC
-       ret = platform_driver_register(&fimc_driver);
-       if (ret < 0)
-               goto err_unregister_g2d_drv;
+       &fimc_driver,
 #endif
-
 #ifdef CONFIG_DRM_EXYNOS_ROTATOR
-       ret = platform_driver_register(&rotator_driver);
-       if (ret < 0)
-               goto err_unregister_fimc_drv;
+       &rotator_driver,
 #endif
-
 #ifdef CONFIG_DRM_EXYNOS_GSC
-       ret = platform_driver_register(&gsc_driver);
-       if (ret < 0)
-               goto err_unregister_rotator_drv;
+       &gsc_driver,
 #endif
-
 #ifdef CONFIG_DRM_EXYNOS_IPP
-       ret = platform_driver_register(&ipp_driver);
-       if (ret < 0)
-               goto err_unregister_gsc_drv;
-
-       ret = exynos_platform_device_ipp_register();
-       if (ret < 0)
-               goto err_unregister_ipp_drv;
+       &ipp_driver,
 #endif
+};
+
+static int exynos_drm_platform_probe(struct platform_device *pdev)
+{
+       struct component_match *match;
+
+       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
 
        match = exynos_drm_match_add(&pdev->dev);
        if (IS_ERR(match)) {
-               ret = PTR_ERR(match);
-               goto err_unregister_resources;
+               return PTR_ERR(match);
        }
 
-       ret = component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
-                                               match);
-       if (ret < 0)
-               goto err_unregister_resources;
-
-       return ret;
-
-err_unregister_resources:
-
-#ifdef CONFIG_DRM_EXYNOS_IPP
-       exynos_platform_device_ipp_unregister();
-err_unregister_ipp_drv:
-       platform_driver_unregister(&ipp_driver);
-err_unregister_gsc_drv:
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_GSC
-       platform_driver_unregister(&gsc_driver);
-err_unregister_rotator_drv:
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_ROTATOR
-       platform_driver_unregister(&rotator_driver);
-err_unregister_fimc_drv:
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_FIMC
-       platform_driver_unregister(&fimc_driver);
-err_unregister_g2d_drv:
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_G2D
-       platform_driver_unregister(&g2d_driver);
-err_unregister_hdmi_drv:
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_HDMI
-       platform_driver_unregister(&hdmi_driver);
-err_unregister_mixer_drv:
-       platform_driver_unregister(&mixer_driver);
-err_unregister_dsi_drv:
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_DSI
-       platform_driver_unregister(&dsi_driver);
-err_unregister_dp_drv:
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_DP
-       platform_driver_unregister(&dp_driver);
-err_unregister_fimd_drv:
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_FIMD
-       platform_driver_unregister(&fimd_driver);
-#endif
-       return ret;
+       return component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
+                                              match);
 }
 
 static int exynos_drm_platform_remove(struct platform_device *pdev)
 {
-#ifdef CONFIG_DRM_EXYNOS_IPP
-       exynos_platform_device_ipp_unregister();
-       platform_driver_unregister(&ipp_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_GSC
-       platform_driver_unregister(&gsc_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_ROTATOR
-       platform_driver_unregister(&rotator_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_FIMC
-       platform_driver_unregister(&fimc_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_G2D
-       platform_driver_unregister(&g2d_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_HDMI
-       platform_driver_unregister(&mixer_driver);
-       platform_driver_unregister(&hdmi_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_FIMD
-       platform_driver_unregister(&fimd_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_DSI
-       platform_driver_unregister(&dsi_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_DP
-       platform_driver_unregister(&dp_driver);
-#endif
        component_master_del(&pdev->dev, &exynos_drm_ops);
        return 0;
 }
 
+static const char * const strings[] = {
+       "samsung,exynos3",
+       "samsung,exynos4",
+       "samsung,exynos5",
+};
+
 static struct platform_driver exynos_drm_platform_driver = {
        .probe  = exynos_drm_platform_probe,
        .remove = exynos_drm_platform_remove,
@@ -739,31 +626,87 @@ static struct platform_driver exynos_drm_platform_driver = {
 
 static int exynos_drm_init(void)
 {
-       int ret;
+       bool is_exynos = false;
+       int ret, i, j;
+
+       /*
+        * Register device object only in case of Exynos SoC.
+        *
+        * Below codes resolves temporarily infinite loop issue incurred
+        * by Exynos drm driver when using multi-platform kernel.
+        * So these codes will be replaced with more generic way later.
+        */
+       for (i = 0; i < ARRAY_SIZE(strings); i++) {
+               if (of_machine_is_compatible(strings[i])) {
+                       is_exynos = true;
+                       break;
+               }
+       }
+
+       if (!is_exynos)
+               return -ENODEV;
+
+       /*
+        * Register device object only in case of Exynos SoC.
+        *
+        * Below codes resolves temporarily infinite loop issue incurred
+        * by Exynos drm driver when using multi-platform kernel.
+        * So these codes will be replaced with more generic way later.
+        */
+       if (!of_machine_is_compatible("samsung,exynos3") &&
+                       !of_machine_is_compatible("samsung,exynos4") &&
+                       !of_machine_is_compatible("samsung,exynos5"))
+               return -ENODEV;
 
        exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
                                                                NULL, 0);
        if (IS_ERR(exynos_drm_pdev))
                return PTR_ERR(exynos_drm_pdev);
 
-#ifdef CONFIG_DRM_EXYNOS_VIDI
        ret = exynos_drm_probe_vidi();
        if (ret < 0)
                goto err_unregister_pd;
+
+       for (i = 0; i < ARRAY_SIZE(exynos_drm_kms_drivers); ++i) {
+               ret = platform_driver_register(exynos_drm_kms_drivers[i]);
+               if (ret < 0)
+                       goto err_unregister_kms_drivers;
+       }
+
+       for (j = 0; j < ARRAY_SIZE(exynos_drm_non_kms_drivers); ++j) {
+               ret = platform_driver_register(exynos_drm_non_kms_drivers[j]);
+               if (ret < 0)
+                       goto err_unregister_non_kms_drivers;
+       }
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+       ret = exynos_platform_device_ipp_register();
+       if (ret < 0)
+               goto err_unregister_non_kms_drivers;
 #endif
 
        ret = platform_driver_register(&exynos_drm_platform_driver);
        if (ret)
-               goto err_remove_vidi;
+               goto err_unregister_resources;
 
        return 0;
 
-err_remove_vidi:
-#ifdef CONFIG_DRM_EXYNOS_VIDI
+err_unregister_resources:
+#ifdef CONFIG_DRM_EXYNOS_IPP
+       exynos_platform_device_ipp_unregister();
+#endif
+
+err_unregister_non_kms_drivers:
+       while (--j >= 0)
+               platform_driver_unregister(exynos_drm_non_kms_drivers[j]);
+
+err_unregister_kms_drivers:
+       while (--i >= 0)
+               platform_driver_unregister(exynos_drm_kms_drivers[i]);
+
        exynos_drm_remove_vidi();
 
 err_unregister_pd:
-#endif
        platform_device_unregister(exynos_drm_pdev);
 
        return ret;
@@ -771,10 +714,22 @@ err_unregister_pd:
 
 static void exynos_drm_exit(void)
 {
+       int i;
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+       exynos_platform_device_ipp_unregister();
+#endif
+
+       for (i = ARRAY_SIZE(exynos_drm_non_kms_drivers) - 1; i >= 0; --i)
+               platform_driver_unregister(exynos_drm_non_kms_drivers[i]);
+
+       for (i = ARRAY_SIZE(exynos_drm_kms_drivers) - 1; i >= 0; --i)
+               platform_driver_unregister(exynos_drm_kms_drivers[i]);
+
        platform_driver_unregister(&exynos_drm_platform_driver);
-#ifdef CONFIG_DRM_EXYNOS_VIDI
+
        exynos_drm_remove_vidi();
-#endif
+
        platform_device_unregister(exynos_drm_pdev);
 }
 
index d22e640f59a012c0b2e26d3d325c3a6154ec26c5..2e5063488c5026ccd99e717d7e132289ca187dee 100644 (file)
@@ -15,6 +15,7 @@
 #ifndef _EXYNOS_DRM_DRV_H_
 #define _EXYNOS_DRM_DRV_H_
 
+#include <drm/drmP.h>
 #include <linux/module.h>
 
 #define MAX_CRTC       3
 #define MAX_FB_BUFFER  4
 #define DEFAULT_ZPOS   -1
 
-#define _wait_for(COND, MS) ({ \
-       unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);       \
-       int ret__ = 0;                                                  \
-       while (!(COND)) {                                               \
-               if (time_after(jiffies, timeout__)) {                   \
-                       ret__ = -ETIMEDOUT;                             \
-                       break;                                          \
-               }                                                       \
-       }                                                               \
-       ret__;                                                          \
-})
-
-#define wait_for(COND, MS) _wait_for(COND, MS)
-
-struct drm_device;
-struct exynos_drm_overlay;
-struct drm_connector;
-
 /* This enumerates device type. */
 enum exynos_drm_device_type {
        EXYNOS_DEVICE_TYPE_NONE,
@@ -83,10 +66,10 @@ enum exynos_drm_output_type {
  * @dma_addr: array of bus(accessed by dma) address to the memory region
  *           allocated for a overlay.
  * @zpos: order of overlay layer(z position).
- * @default_win: a window to be enabled.
- * @color_key: color key on or off.
  * @index_color: if using color key feature then this value would be used
  *                     as index color.
+ * @default_win: a window to be enabled.
+ * @color_key: color key on or off.
  * @local_path: in case of lcd type, local path mode on or off.
  * @transparency: transparency on or off.
  * @activated: activated or not.
@@ -114,19 +97,20 @@ struct exynos_drm_overlay {
        uint32_t pixel_format;
        dma_addr_t dma_addr[MAX_FB_BUFFER];
        int zpos;
-
-       bool default_win;
-       bool color_key;
        unsigned int index_color;
-       bool local_path;
-       bool transparency;
-       bool activated;
+
+       bool default_win:1;
+       bool color_key:1;
+       bool local_path:1;
+       bool transparency:1;
+       bool activated:1;
 };
 
 /*
  * Exynos DRM Display Structure.
  *     - this structure is common to analog tv, digital tv and lcd panel.
  *
+ * @create_connector: initialize and register a new connector
  * @remove: cleans up the display for removal
  * @mode_fixup: fix mode data comparing to hw specific display mode.
  * @mode_set: convert drm_display_mode to hw specific display mode and
@@ -168,7 +152,6 @@ struct exynos_drm_display {
        struct drm_encoder *encoder;
        struct drm_connector *connector;
        struct exynos_drm_display_ops *ops;
-       void *ctx;
 };
 
 /*
@@ -227,7 +210,6 @@ struct exynos_drm_manager {
        struct drm_crtc *crtc;
        int pipe;
        struct exynos_drm_manager_ops *ops;
-       void *ctx;
 };
 
 struct exynos_drm_g2d_private {
@@ -279,8 +261,6 @@ struct exynos_drm_private {
  * @dev: pointer to device object for subdrv device driver.
  * @drm_dev: pointer to drm_device and this pointer would be set
  *     when sub driver calls exynos_drm_subdrv_register().
- * @manager: subdrv has its own manager to control a hardware appropriately
- *     and we can access a hardware drawing on this manager.
  * @probe: this callback would be called by exynos drm driver after
  *     subdrv is registered to it.
  * @remove: this callback is used to release resources created
@@ -312,45 +292,34 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev);
 int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
 void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
 
-/*
- * this function registers exynos drm hdmi platform device. It ensures only one
- * instance of the device is created.
- */
-int exynos_platform_device_hdmi_register(void);
-
-/*
- * this function unregisters exynos drm hdmi platform device if it exists.
- */
-void exynos_platform_device_hdmi_unregister(void);
-
-/*
- * this function registers exynos drm ipp platform device.
- */
+#ifdef CONFIG_DRM_EXYNOS_IPP
 int exynos_platform_device_ipp_register(void);
-
-/*
- * this function unregisters exynos drm ipp platform device if it exists.
- */
 void exynos_platform_device_ipp_unregister(void);
+#else
+static inline int exynos_platform_device_ipp_register(void) { return 0; }
+static inline void exynos_platform_device_ipp_unregister(void) {}
+#endif
+
 
 #ifdef CONFIG_DRM_EXYNOS_DPI
 struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
-int exynos_dpi_remove(struct device *dev);
+int exynos_dpi_remove(struct exynos_drm_display *display);
 #else
 static inline struct exynos_drm_display *
 exynos_dpi_probe(struct device *dev) { return NULL; }
-static inline int exynos_dpi_remove(struct device *dev) { return 0; }
+static inline int exynos_dpi_remove(struct exynos_drm_display *display)
+{
+       return 0;
+}
 #endif
 
-/*
- * this function registers exynos drm vidi platform device/driver.
- */
+#ifdef CONFIG_DRM_EXYNOS_VIDI
 int exynos_drm_probe_vidi(void);
-
-/*
- * this function unregister exynos drm vidi platform device/driver.
- */
 void exynos_drm_remove_vidi(void);
+#else
+static inline int exynos_drm_probe_vidi(void) { return 0; }
+static inline void exynos_drm_remove_vidi(void) {}
+#endif
 
 /* This function creates a encoder and a connector, and initializes them. */
 int exynos_drm_create_enc_conn(struct drm_device *dev,
index acf7e9e39dcd86e11590d7398ee136a9f81d8a60..05fe93dc57a8bccdd14b6db9cb40f9ef99944619 100644 (file)
@@ -268,9 +268,9 @@ struct exynos_dsi_driver_data {
 };
 
 struct exynos_dsi {
+       struct exynos_drm_display display;
        struct mipi_dsi_host dsi_host;
        struct drm_connector connector;
-       struct drm_encoder *encoder;
        struct device_node *panel_node;
        struct drm_panel *panel;
        struct device *dev;
@@ -304,6 +304,11 @@ struct exynos_dsi {
 #define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
 #define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
 
+static inline struct exynos_dsi *display_to_dsi(struct exynos_drm_display *d)
+{
+       return container_of(d, struct exynos_dsi, display);
+}
+
 static struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
        .plltmr_reg = 0x50,
        .has_freqband = 1,
@@ -316,6 +321,11 @@ static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
        .has_clklane_stop = 1,
 };
 
+static struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
+       .plltmr_reg = 0x58,
+       .has_clklane_stop = 1,
+};
+
 static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
        .plltmr_reg = 0x58,
 };
@@ -325,6 +335,8 @@ static struct of_device_id exynos_dsi_of_match[] = {
          .data = &exynos3_dsi_driver_data },
        { .compatible = "samsung,exynos4210-mipi-dsi",
          .data = &exynos4_dsi_driver_data },
+       { .compatible = "samsung,exynos4415-mipi-dsi",
+         .data = &exynos4415_dsi_driver_data },
        { .compatible = "samsung,exynos5410-mipi-dsi",
          .data = &exynos5_dsi_driver_data },
        { }
@@ -1104,7 +1116,7 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
 static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
 {
        struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
-       struct drm_encoder *encoder = dsi->encoder;
+       struct drm_encoder *encoder = dsi->display.encoder;
 
        if (dsi->state & DSIM_STATE_ENABLED)
                exynos_drm_crtc_te_handler(encoder->crtc);
@@ -1143,6 +1155,7 @@ static int exynos_dsi_init(struct exynos_dsi *dsi)
 static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
 {
        int ret;
+       int te_gpio_irq;
 
        dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
        if (!gpio_is_valid(dsi->te_gpio)) {
@@ -1157,14 +1170,10 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
                goto out;
        }
 
-       /*
-        * This TE GPIO IRQ should not be set to IRQ_NOAUTOEN, because panel
-        * calls drm_panel_init() first then calls mipi_dsi_attach() in probe().
-        * It means that te_gpio is invalid when exynos_dsi_enable_irq() is
-        * called by drm_panel_init() before panel is attached.
-        */
-       ret = request_threaded_irq(gpio_to_irq(dsi->te_gpio),
-                                       exynos_dsi_te_irq_handler, NULL,
+       te_gpio_irq = gpio_to_irq(dsi->te_gpio);
+
+       irq_set_status_flags(te_gpio_irq, IRQ_NOAUTOEN);
+       ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL,
                                        IRQF_TRIGGER_RISING, "TE", dsi);
        if (ret) {
                dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
@@ -1195,9 +1204,6 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
        dsi->mode_flags = device->mode_flags;
        dsi->panel_node = device->dev.of_node;
 
-       if (dsi->connector.dev)
-               drm_helper_hpd_irq_event(dsi->connector.dev);
-
        /*
         * This is a temporary solution and should be made by more generic way.
         *
@@ -1211,6 +1217,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
                        return ret;
        }
 
+       if (dsi->connector.dev)
+               drm_helper_hpd_irq_event(dsi->connector.dev);
+
        return 0;
 }
 
@@ -1236,7 +1245,7 @@ static bool exynos_dsi_is_short_dsi_type(u8 type)
 }
 
 static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
-                                      struct mipi_dsi_msg *msg)
+                                       const struct mipi_dsi_msg *msg)
 {
        struct exynos_dsi *dsi = host_to_dsi(host);
        struct exynos_dsi_transfer xfer;
@@ -1369,16 +1378,17 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
        exynos_dsi_set_display_mode(dsi);
        exynos_dsi_set_display_enable(dsi, true);
 
+       dsi->state |= DSIM_STATE_ENABLED;
+
        ret = drm_panel_enable(dsi->panel);
        if (ret < 0) {
+               dsi->state &= ~DSIM_STATE_ENABLED;
                exynos_dsi_set_display_enable(dsi, false);
                drm_panel_unprepare(dsi->panel);
                exynos_dsi_poweroff(dsi);
                return ret;
        }
 
-       dsi->state |= DSIM_STATE_ENABLED;
-
        return 0;
 }
 
@@ -1397,7 +1407,7 @@ static void exynos_dsi_disable(struct exynos_dsi *dsi)
 
 static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode)
 {
-       struct exynos_dsi *dsi = display->ctx;
+       struct exynos_dsi *dsi = display_to_dsi(display);
 
        if (dsi->panel) {
                switch (mode) {
@@ -1474,7 +1484,7 @@ exynos_dsi_best_encoder(struct drm_connector *connector)
 {
        struct exynos_dsi *dsi = connector_to_dsi(connector);
 
-       return dsi->encoder;
+       return dsi->display.encoder;
 }
 
 static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
@@ -1486,12 +1496,10 @@ static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
 static int exynos_dsi_create_connector(struct exynos_drm_display *display,
                                       struct drm_encoder *encoder)
 {
-       struct exynos_dsi *dsi = display->ctx;
+       struct exynos_dsi *dsi = display_to_dsi(display);
        struct drm_connector *connector = &dsi->connector;
        int ret;
 
-       dsi->encoder = encoder;
-
        connector->polled = DRM_CONNECTOR_POLL_HPD;
 
        ret = drm_connector_init(encoder->dev, connector,
@@ -1512,7 +1520,7 @@ static int exynos_dsi_create_connector(struct exynos_drm_display *display,
 static void exynos_dsi_mode_set(struct exynos_drm_display *display,
                         struct drm_display_mode *mode)
 {
-       struct exynos_dsi *dsi = display->ctx;
+       struct exynos_dsi *dsi = display_to_dsi(display);
        struct videomode *vm = &dsi->vm;
 
        vm->hactive = mode->hdisplay;
@@ -1531,10 +1539,6 @@ static struct exynos_drm_display_ops exynos_dsi_display_ops = {
        .dpms = exynos_dsi_dpms
 };
 
-static struct exynos_drm_display exynos_dsi_display = {
-       .type = EXYNOS_DISPLAY_TYPE_LCD,
-       .ops = &exynos_dsi_display_ops,
-};
 MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
 
 /* of_* functions will be removed after merge of of_graph patches */
@@ -1640,28 +1644,28 @@ end:
 static int exynos_dsi_bind(struct device *dev, struct device *master,
                                void *data)
 {
+       struct exynos_drm_display *display = dev_get_drvdata(dev);
+       struct exynos_dsi *dsi = display_to_dsi(display);
        struct drm_device *drm_dev = data;
-       struct exynos_dsi *dsi;
        int ret;
 
-       ret = exynos_drm_create_enc_conn(drm_dev, &exynos_dsi_display);
+       ret = exynos_drm_create_enc_conn(drm_dev, display);
        if (ret) {
                DRM_ERROR("Encoder create [%d] failed with %d\n",
-                               exynos_dsi_display.type, ret);
+                         display->type, ret);
                return ret;
        }
 
-       dsi = exynos_dsi_display.ctx;
-
        return mipi_dsi_host_register(&dsi->dsi_host);
 }
 
 static void exynos_dsi_unbind(struct device *dev, struct device *master,
                                void *data)
 {
-       struct exynos_dsi *dsi = exynos_dsi_display.ctx;
+       struct exynos_drm_display *display = dev_get_drvdata(dev);
+       struct exynos_dsi *dsi = display_to_dsi(display);
 
-       exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
+       exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
 
        mipi_dsi_host_unregister(&dsi->dsi_host);
 }
@@ -1673,22 +1677,23 @@ static const struct component_ops exynos_dsi_component_ops = {
 
 static int exynos_dsi_probe(struct platform_device *pdev)
 {
+       struct device *dev = &pdev->dev;
        struct resource *res;
        struct exynos_dsi *dsi;
        int ret;
 
-       ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
-                                       exynos_dsi_display.type);
+       dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+       if (!dsi)
+               return -ENOMEM;
+
+       dsi->display.type = EXYNOS_DISPLAY_TYPE_LCD;
+       dsi->display.ops = &exynos_dsi_display_ops;
+
+       ret = exynos_drm_component_add(dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+                                      dsi->display.type);
        if (ret)
                return ret;
 
-       dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
-       if (!dsi) {
-               dev_err(&pdev->dev, "failed to allocate dsi object.\n");
-               ret = -ENOMEM;
-               goto err_del_component;
-       }
-
        /* To be checked as invalid one */
        dsi->te_gpio = -ENOENT;
 
@@ -1697,9 +1702,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
        INIT_LIST_HEAD(&dsi->transfer_list);
 
        dsi->dsi_host.ops = &exynos_dsi_ops;
-       dsi->dsi_host.dev = &pdev->dev;
+       dsi->dsi_host.dev = dev;
 
-       dsi->dev = &pdev->dev;
+       dsi->dev = dev;
        dsi->driver_data = exynos_dsi_get_driver_data(pdev);
 
        ret = exynos_dsi_parse_dt(dsi);
@@ -1708,70 +1713,68 @@ static int exynos_dsi_probe(struct platform_device *pdev)
 
        dsi->supplies[0].supply = "vddcore";
        dsi->supplies[1].supply = "vddio";
-       ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(dsi->supplies),
+       ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
                                      dsi->supplies);
        if (ret) {
-               dev_info(&pdev->dev, "failed to get regulators: %d\n", ret);
+               dev_info(dev, "failed to get regulators: %d\n", ret);
                return -EPROBE_DEFER;
        }
 
-       dsi->pll_clk = devm_clk_get(&pdev->dev, "pll_clk");
+       dsi->pll_clk = devm_clk_get(dev, "pll_clk");
        if (IS_ERR(dsi->pll_clk)) {
-               dev_info(&pdev->dev, "failed to get dsi pll input clock\n");
+               dev_info(dev, "failed to get dsi pll input clock\n");
                ret = PTR_ERR(dsi->pll_clk);
                goto err_del_component;
        }
 
-       dsi->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+       dsi->bus_clk = devm_clk_get(dev, "bus_clk");
        if (IS_ERR(dsi->bus_clk)) {
-               dev_info(&pdev->dev, "failed to get dsi bus clock\n");
+               dev_info(dev, "failed to get dsi bus clock\n");
                ret = PTR_ERR(dsi->bus_clk);
                goto err_del_component;
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
+       dsi->reg_base = devm_ioremap_resource(dev, res);
        if (IS_ERR(dsi->reg_base)) {
-               dev_err(&pdev->dev, "failed to remap io region\n");
+               dev_err(dev, "failed to remap io region\n");
                ret = PTR_ERR(dsi->reg_base);
                goto err_del_component;
        }
 
-       dsi->phy = devm_phy_get(&pdev->dev, "dsim");
+       dsi->phy = devm_phy_get(dev, "dsim");
        if (IS_ERR(dsi->phy)) {
-               dev_info(&pdev->dev, "failed to get dsim phy\n");
+               dev_info(dev, "failed to get dsim phy\n");
                ret = PTR_ERR(dsi->phy);
                goto err_del_component;
        }
 
        dsi->irq = platform_get_irq(pdev, 0);
        if (dsi->irq < 0) {
-               dev_err(&pdev->dev, "failed to request dsi irq resource\n");
+               dev_err(dev, "failed to request dsi irq resource\n");
                ret = dsi->irq;
                goto err_del_component;
        }
 
        irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
-       ret = devm_request_threaded_irq(&pdev->dev, dsi->irq, NULL,
+       ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
                                        exynos_dsi_irq, IRQF_ONESHOT,
-                                       dev_name(&pdev->dev), dsi);
+                                       dev_name(dev), dsi);
        if (ret) {
-               dev_err(&pdev->dev, "failed to request dsi irq\n");
+               dev_err(dev, "failed to request dsi irq\n");
                goto err_del_component;
        }
 
-       exynos_dsi_display.ctx = dsi;
-
-       platform_set_drvdata(pdev, &exynos_dsi_display);
+       platform_set_drvdata(pdev, &dsi->display);
 
-       ret = component_add(&pdev->dev, &exynos_dsi_component_ops);
+       ret = component_add(dev, &exynos_dsi_component_ops);
        if (ret)
                goto err_del_component;
 
        return ret;
 
 err_del_component:
-       exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+       exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
        return ret;
 }
 
index b7a1620a7e79cfff9abff9eec3f283316f524249..26305d8dd93a4a64060ca53609cdec2f1bd77eb4 100644 (file)
@@ -14,8 +14,6 @@
 #ifndef _EXYNOS_DRM_ENCODER_H_
 #define _EXYNOS_DRM_ENCODER_H_
 
-struct exynos_drm_manager;
-
 void exynos_drm_encoder_setup(struct drm_device *dev);
 struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
                        struct exynos_drm_display *mgr,
index 085b066a99934116963b0aa3a1fe6d4a136387c2..e5810d13bf9c5377b9faa5afc0fc333bd6d2ab17 100644 (file)
@@ -84,8 +84,6 @@
 /* FIMD has totally five hardware windows. */
 #define WINDOWS_NR     5
 
-#define get_fimd_manager(mgr)  platform_get_drvdata(to_platform_device(dev))
-
 struct fimd_driver_data {
        unsigned int timing_base;
        unsigned int lcdblk_offset;
@@ -96,6 +94,7 @@ struct fimd_driver_data {
        unsigned int has_clksel:1;
        unsigned int has_limited_fmt:1;
        unsigned int has_vidoutcon:1;
+       unsigned int has_vtsel:1;
 };
 
 static struct fimd_driver_data s3c64xx_fimd_driver_data = {
@@ -118,6 +117,17 @@ static struct fimd_driver_data exynos4_fimd_driver_data = {
        .lcdblk_vt_shift = 10,
        .lcdblk_bypass_shift = 1,
        .has_shadowcon = 1,
+       .has_vtsel = 1,
+};
+
+static struct fimd_driver_data exynos4415_fimd_driver_data = {
+       .timing_base = 0x20000,
+       .lcdblk_offset = 0x210,
+       .lcdblk_vt_shift = 10,
+       .lcdblk_bypass_shift = 1,
+       .has_shadowcon = 1,
+       .has_vidoutcon = 1,
+       .has_vtsel = 1,
 };
 
 static struct fimd_driver_data exynos5_fimd_driver_data = {
@@ -127,6 +137,7 @@ static struct fimd_driver_data exynos5_fimd_driver_data = {
        .lcdblk_bypass_shift = 15,
        .has_shadowcon = 1,
        .has_vidoutcon = 1,
+       .has_vtsel = 1,
 };
 
 struct fimd_win_data {
@@ -146,6 +157,7 @@ struct fimd_win_data {
 };
 
 struct fimd_context {
+       struct exynos_drm_manager       manager;
        struct device                   *dev;
        struct drm_device               *drm_dev;
        struct clk                      *bus_clk;
@@ -173,6 +185,11 @@ struct fimd_context {
        struct exynos_drm_display *display;
 };
 
+static inline struct fimd_context *mgr_to_fimd(struct exynos_drm_manager *mgr)
+{
+       return container_of(mgr, struct fimd_context, manager);
+}
+
 static const struct of_device_id fimd_driver_dt_match[] = {
        { .compatible = "samsung,s3c6400-fimd",
          .data = &s3c64xx_fimd_driver_data },
@@ -180,6 +197,8 @@ static const struct of_device_id fimd_driver_dt_match[] = {
          .data = &exynos3_fimd_driver_data },
        { .compatible = "samsung,exynos4210-fimd",
          .data = &exynos4_fimd_driver_data },
+       { .compatible = "samsung,exynos4415-fimd",
+         .data = &exynos4415_fimd_driver_data },
        { .compatible = "samsung,exynos5250-fimd",
          .data = &exynos5_fimd_driver_data },
        {},
@@ -197,7 +216,7 @@ static inline struct fimd_driver_data *drm_fimd_get_driver_data(
 
 static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
 
        if (ctx->suspended)
                return;
@@ -214,9 +233,35 @@ static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
                DRM_DEBUG_KMS("vblank wait timed out.\n");
 }
 
+static void fimd_enable_video_output(struct fimd_context *ctx, int win,
+                                       bool enable)
+{
+       u32 val = readl(ctx->regs + WINCON(win));
+
+       if (enable)
+               val |= WINCONx_ENWIN;
+       else
+               val &= ~WINCONx_ENWIN;
+
+       writel(val, ctx->regs + WINCON(win));
+}
+
+static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win,
+                                               bool enable)
+{
+       u32 val = readl(ctx->regs + SHADOWCON);
+
+       if (enable)
+               val |= SHADOWCON_CHx_ENABLE(win);
+       else
+               val &= ~SHADOWCON_CHx_ENABLE(win);
+
+       writel(val, ctx->regs + SHADOWCON);
+}
+
 static void fimd_clear_channel(struct exynos_drm_manager *mgr)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
        int win, ch_enabled = 0;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -226,16 +271,12 @@ static void fimd_clear_channel(struct exynos_drm_manager *mgr)
                u32 val = readl(ctx->regs + WINCON(win));
 
                if (val & WINCONx_ENWIN) {
-                       /* wincon */
-                       val &= ~WINCONx_ENWIN;
-                       writel(val, ctx->regs + WINCON(win));
-
-                       /* unprotect windows */
-                       if (ctx->driver_data->has_shadowcon) {
-                               val = readl(ctx->regs + SHADOWCON);
-                               val &= ~SHADOWCON_CHx_ENABLE(win);
-                               writel(val, ctx->regs + SHADOWCON);
-                       }
+                       fimd_enable_video_output(ctx, win, false);
+
+                       if (ctx->driver_data->has_shadowcon)
+                               fimd_enable_shadow_channel_path(ctx, win,
+                                                               false);
+
                        ch_enabled = 1;
                }
        }
@@ -253,7 +294,7 @@ static void fimd_clear_channel(struct exynos_drm_manager *mgr)
 static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
                        struct drm_device *drm_dev)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
        struct exynos_drm_private *priv;
        priv = drm_dev->dev_private;
 
@@ -275,7 +316,7 @@ static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
 
 static void fimd_mgr_remove(struct exynos_drm_manager *mgr)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
 
        /* detach this sub driver from iommu mapping if supported. */
        if (is_drm_iommu_supported(ctx->drm_dev))
@@ -315,14 +356,14 @@ static bool fimd_mode_fixup(struct exynos_drm_manager *mgr,
 static void fimd_mode_set(struct exynos_drm_manager *mgr,
                const struct drm_display_mode *in_mode)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
 
        drm_mode_copy(&ctx->mode, in_mode);
 }
 
 static void fimd_commit(struct exynos_drm_manager *mgr)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
        struct drm_display_mode *mode = &ctx->mode;
        struct fimd_driver_data *driver_data = ctx->driver_data;
        void *timing_base = ctx->regs + driver_data->timing_base;
@@ -343,7 +384,8 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
                writel(0, timing_base + I80IFCONFBx(0));
 
                /* set video type selection to I80 interface */
-               if (ctx->sysreg && regmap_update_bits(ctx->sysreg,
+               if (driver_data->has_vtsel && ctx->sysreg &&
+                               regmap_update_bits(ctx->sysreg,
                                        driver_data->lcdblk_offset,
                                        0x3 << driver_data->lcdblk_vt_shift,
                                        0x1 << driver_data->lcdblk_vt_shift)) {
@@ -421,7 +463,7 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
 
 static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
        u32 val;
 
        if (ctx->suspended)
@@ -431,12 +473,19 @@ static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
                val = readl(ctx->regs + VIDINTCON0);
 
                val |= VIDINTCON0_INT_ENABLE;
-               val |= VIDINTCON0_INT_FRAME;
 
-               val &= ~VIDINTCON0_FRAMESEL0_MASK;
-               val |= VIDINTCON0_FRAMESEL0_VSYNC;
-               val &= ~VIDINTCON0_FRAMESEL1_MASK;
-               val |= VIDINTCON0_FRAMESEL1_NONE;
+               if (ctx->i80_if) {
+                       val |= VIDINTCON0_INT_I80IFDONE;
+                       val |= VIDINTCON0_INT_SYSMAINCON;
+                       val &= ~VIDINTCON0_INT_SYSSUBCON;
+               } else {
+                       val |= VIDINTCON0_INT_FRAME;
+
+                       val &= ~VIDINTCON0_FRAMESEL0_MASK;
+                       val |= VIDINTCON0_FRAMESEL0_VSYNC;
+                       val &= ~VIDINTCON0_FRAMESEL1_MASK;
+                       val |= VIDINTCON0_FRAMESEL1_NONE;
+               }
 
                writel(val, ctx->regs + VIDINTCON0);
        }
@@ -446,7 +495,7 @@ static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
 
 static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
        u32 val;
 
        if (ctx->suspended)
@@ -455,9 +504,15 @@ static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
        if (test_and_clear_bit(0, &ctx->irq_flags)) {
                val = readl(ctx->regs + VIDINTCON0);
 
-               val &= ~VIDINTCON0_INT_FRAME;
                val &= ~VIDINTCON0_INT_ENABLE;
 
+               if (ctx->i80_if) {
+                       val &= ~VIDINTCON0_INT_I80IFDONE;
+                       val &= ~VIDINTCON0_INT_SYSMAINCON;
+                       val &= ~VIDINTCON0_INT_SYSSUBCON;
+               } else
+                       val &= ~VIDINTCON0_INT_FRAME;
+
                writel(val, ctx->regs + VIDINTCON0);
        }
 }
@@ -465,7 +520,7 @@ static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
 static void fimd_win_mode_set(struct exynos_drm_manager *mgr,
                        struct exynos_drm_overlay *overlay)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
        struct fimd_win_data *win_data;
        int win;
        unsigned long offset;
@@ -623,7 +678,7 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
 
 static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
        struct fimd_win_data *win_data;
        int win = zpos;
        unsigned long val, alpha, size;
@@ -730,20 +785,14 @@ static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
        if (win != 0)
                fimd_win_set_colkey(ctx, win);
 
-       /* wincon */
-       val = readl(ctx->regs + WINCON(win));
-       val |= WINCONx_ENWIN;
-       writel(val, ctx->regs + WINCON(win));
+       fimd_enable_video_output(ctx, win, true);
+
+       if (ctx->driver_data->has_shadowcon)
+               fimd_enable_shadow_channel_path(ctx, win, true);
 
        /* Enable DMA channel and unprotect windows */
        fimd_shadow_protect_win(ctx, win, false);
 
-       if (ctx->driver_data->has_shadowcon) {
-               val = readl(ctx->regs + SHADOWCON);
-               val |= SHADOWCON_CHx_ENABLE(win);
-               writel(val, ctx->regs + SHADOWCON);
-       }
-
        win_data->enabled = true;
 
        if (ctx->i80_if)
@@ -752,10 +801,9 @@ static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
 
 static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
        struct fimd_win_data *win_data;
        int win = zpos;
-       u32 val;
 
        if (win == DEFAULT_ZPOS)
                win = ctx->default_win;
@@ -774,18 +822,12 @@ static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
        /* protect windows */
        fimd_shadow_protect_win(ctx, win, true);
 
-       /* wincon */
-       val = readl(ctx->regs + WINCON(win));
-       val &= ~WINCONx_ENWIN;
-       writel(val, ctx->regs + WINCON(win));
+       fimd_enable_video_output(ctx, win, false);
 
-       /* unprotect windows */
-       if (ctx->driver_data->has_shadowcon) {
-               val = readl(ctx->regs + SHADOWCON);
-               val &= ~SHADOWCON_CHx_ENABLE(win);
-               writel(val, ctx->regs + SHADOWCON);
-       }
+       if (ctx->driver_data->has_shadowcon)
+               fimd_enable_shadow_channel_path(ctx, win, false);
 
+       /* unprotect windows */
        fimd_shadow_protect_win(ctx, win, false);
 
        win_data->enabled = false;
@@ -793,7 +835,7 @@ static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
 
 static void fimd_window_suspend(struct exynos_drm_manager *mgr)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
        struct fimd_win_data *win_data;
        int i;
 
@@ -803,12 +845,11 @@ static void fimd_window_suspend(struct exynos_drm_manager *mgr)
                if (win_data->enabled)
                        fimd_win_disable(mgr, i);
        }
-       fimd_wait_for_vblank(mgr);
 }
 
 static void fimd_window_resume(struct exynos_drm_manager *mgr)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
        struct fimd_win_data *win_data;
        int i;
 
@@ -821,7 +862,7 @@ static void fimd_window_resume(struct exynos_drm_manager *mgr)
 
 static void fimd_apply(struct exynos_drm_manager *mgr)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
        struct fimd_win_data *win_data;
        int i;
 
@@ -838,7 +879,7 @@ static void fimd_apply(struct exynos_drm_manager *mgr)
 
 static int fimd_poweron(struct exynos_drm_manager *mgr)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
        int ret;
 
        if (!ctx->suspended)
@@ -886,7 +927,7 @@ bus_clk_err:
 
 static int fimd_poweroff(struct exynos_drm_manager *mgr)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
 
        if (ctx->suspended)
                return 0;
@@ -928,39 +969,41 @@ static void fimd_dpms(struct exynos_drm_manager *mgr, int mode)
 
 static void fimd_trigger(struct device *dev)
 {
-       struct exynos_drm_manager *mgr = get_fimd_manager(dev);
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = dev_get_drvdata(dev);
        struct fimd_driver_data *driver_data = ctx->driver_data;
        void *timing_base = ctx->regs + driver_data->timing_base;
        u32 reg;
 
-       atomic_set(&ctx->triggering, 1);
+        /*
+         * Skips triggering if in triggering state, because multiple triggering
+         * requests can cause panel reset.
+         */
+       if (atomic_read(&ctx->triggering))
+               return;
 
-       reg = readl(ctx->regs + VIDINTCON0);
-       reg |= (VIDINTCON0_INT_ENABLE | VIDINTCON0_INT_I80IFDONE |
-                                               VIDINTCON0_INT_SYSMAINCON);
-       writel(reg, ctx->regs + VIDINTCON0);
+       /* Enters triggering mode */
+       atomic_set(&ctx->triggering, 1);
 
        reg = readl(timing_base + TRIGCON);
        reg |= (TRGMODE_I80_RGB_ENABLE_I80 | SWTRGCMD_I80_RGB_ENABLE);
        writel(reg, timing_base + TRIGCON);
+
+       /*
+        * Exits triggering mode if vblank is not enabled yet, because when the
+        * VIDINTCON0 register is not set, it can not exit from triggering mode.
+        */
+       if (!test_bit(0, &ctx->irq_flags))
+               atomic_set(&ctx->triggering, 0);
 }
 
 static void fimd_te_handler(struct exynos_drm_manager *mgr)
 {
-       struct fimd_context *ctx = mgr->ctx;
+       struct fimd_context *ctx = mgr_to_fimd(mgr);
 
        /* Checks the crtc is detached already from encoder */
        if (ctx->pipe < 0 || !ctx->drm_dev)
                return;
 
-        /*
-        * Skips to trigger if in triggering state, because multiple triggering
-        * requests can cause panel reset.
-        */
-       if (atomic_read(&ctx->triggering))
-               return;
-
        /*
         * If there is a page flip request, triggers and handles the page flip
         * event so that current fb can be updated into panel GRAM.
@@ -972,10 +1015,10 @@ static void fimd_te_handler(struct exynos_drm_manager *mgr)
        if (atomic_read(&ctx->wait_vsync_event)) {
                atomic_set(&ctx->wait_vsync_event, 0);
                wake_up(&ctx->wait_vsync_queue);
-
-               if (!atomic_read(&ctx->triggering))
-                       drm_handle_vblank(ctx->drm_dev, ctx->pipe);
        }
+
+       if (test_bit(0, &ctx->irq_flags))
+               drm_handle_vblank(ctx->drm_dev, ctx->pipe);
 }
 
 static struct exynos_drm_manager_ops fimd_manager_ops = {
@@ -992,11 +1035,6 @@ static struct exynos_drm_manager_ops fimd_manager_ops = {
        .te_handler = fimd_te_handler,
 };
 
-static struct exynos_drm_manager fimd_manager = {
-       .type = EXYNOS_DISPLAY_TYPE_LCD,
-       .ops = &fimd_manager_ops,
-};
-
 static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
 {
        struct fimd_context *ctx = (struct fimd_context *)dev_id;
@@ -1013,16 +1051,10 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
                goto out;
 
        if (ctx->i80_if) {
-               /* unset I80 frame done interrupt */
-               val = readl(ctx->regs + VIDINTCON0);
-               val &= ~(VIDINTCON0_INT_I80IFDONE | VIDINTCON0_INT_SYSMAINCON);
-               writel(val, ctx->regs + VIDINTCON0);
+               exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
 
-               /* exit triggering mode */
+               /* Exits triggering mode */
                atomic_set(&ctx->triggering, 0);
-
-               drm_handle_vblank(ctx->drm_dev, ctx->pipe);
-               exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
        } else {
                drm_handle_vblank(ctx->drm_dev, ctx->pipe);
                exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
@@ -1040,11 +1072,11 @@ out:
 
 static int fimd_bind(struct device *dev, struct device *master, void *data)
 {
-       struct fimd_context *ctx = fimd_manager.ctx;
+       struct fimd_context *ctx = dev_get_drvdata(dev);
        struct drm_device *drm_dev = data;
 
-       fimd_mgr_initialize(&fimd_manager, drm_dev);
-       exynos_drm_crtc_create(&fimd_manager);
+       fimd_mgr_initialize(&ctx->manager, drm_dev);
+       exynos_drm_crtc_create(&ctx->manager);
        if (ctx->display)
                exynos_drm_create_enc_conn(drm_dev, ctx->display);
 
@@ -1055,15 +1087,14 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
 static void fimd_unbind(struct device *dev, struct device *master,
                        void *data)
 {
-       struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
-       struct fimd_context *ctx = fimd_manager.ctx;
+       struct fimd_context *ctx = dev_get_drvdata(dev);
 
-       fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
+       fimd_dpms(&ctx->manager, DRM_MODE_DPMS_OFF);
 
        if (ctx->display)
-               exynos_dpi_remove(dev);
+               exynos_dpi_remove(ctx->display);
 
-       fimd_mgr_remove(mgr);
+       fimd_mgr_remove(&ctx->manager);
 }
 
 static const struct component_ops fimd_component_ops = {
@@ -1079,21 +1110,20 @@ static int fimd_probe(struct platform_device *pdev)
        struct resource *res;
        int ret = -EINVAL;
 
-       ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
-                                       fimd_manager.type);
-       if (ret)
-               return ret;
-
-       if (!dev->of_node) {
-               ret = -ENODEV;
-               goto err_del_component;
-       }
+       if (!dev->of_node)
+               return -ENODEV;
 
        ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
-       if (!ctx) {
-               ret = -ENOMEM;
-               goto err_del_component;
-       }
+       if (!ctx)
+               return -ENOMEM;
+
+       ctx->manager.type = EXYNOS_DISPLAY_TYPE_LCD;
+       ctx->manager.ops = &fimd_manager_ops;
+
+       ret = exynos_drm_component_add(dev, EXYNOS_DEVICE_TYPE_CRTC,
+                                      ctx->manager.type);
+       if (ret)
+               return ret;
 
        ctx->dev = dev;
        ctx->suspended = true;
@@ -1182,27 +1212,27 @@ static int fimd_probe(struct platform_device *pdev)
        init_waitqueue_head(&ctx->wait_vsync_queue);
        atomic_set(&ctx->wait_vsync_event, 0);
 
-       platform_set_drvdata(pdev, &fimd_manager);
-
-       fimd_manager.ctx = ctx;
+       platform_set_drvdata(pdev, ctx);
 
        ctx->display = exynos_dpi_probe(dev);
-       if (IS_ERR(ctx->display))
-               return PTR_ERR(ctx->display);
+       if (IS_ERR(ctx->display)) {
+               ret = PTR_ERR(ctx->display);
+               goto err_del_component;
+       }
 
-       pm_runtime_enable(&pdev->dev);
+       pm_runtime_enable(dev);
 
-       ret = component_add(&pdev->dev, &fimd_component_ops);
+       ret = component_add(dev, &fimd_component_ops);
        if (ret)
                goto err_disable_pm_runtime;
 
        return ret;
 
 err_disable_pm_runtime:
-       pm_runtime_disable(&pdev->dev);
+       pm_runtime_disable(dev);
 
 err_del_component:
-       exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+       exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CRTC);
        return ret;
 }
 
index df7a77d3eff84b769342f04114a31a040a843769..6ff8599f6cbf0beb09dc48a5a1b1c7b110a739bf 100644 (file)
@@ -302,9 +302,12 @@ static void g2d_fini_cmdlist(struct g2d_data *g2d)
        struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
 
        kfree(g2d->cmdlist_node);
-       dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
-                       g2d->cmdlist_pool_virt,
-                       g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+
+       if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) {
+               dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+                               g2d->cmdlist_pool_virt,
+                               g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+       }
 }
 
 static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
index 72376d41c5129c609be952f623be43b2e44d949c..35d25889b476ebe281439e0c3363e61dd7b2ab5f 100644 (file)
@@ -40,7 +40,6 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
 
 #else
 
-struct dma_iommu_mapping;
 static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
 {
        return 0;
index 00d74b18f7cbd841f88d252d57ca47eb5d768863..d5ad17dfc24ddc1eafbce0c212a607c5598cbc1c 100644 (file)
@@ -426,18 +426,21 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
        c_node->start_work = ipp_create_cmd_work();
        if (IS_ERR(c_node->start_work)) {
                DRM_ERROR("failed to create start work.\n");
+               ret = PTR_ERR(c_node->start_work);
                goto err_remove_id;
        }
 
        c_node->stop_work = ipp_create_cmd_work();
        if (IS_ERR(c_node->stop_work)) {
                DRM_ERROR("failed to create stop work.\n");
+               ret = PTR_ERR(c_node->stop_work);
                goto err_free_start;
        }
 
        c_node->event_work = ipp_create_event_work();
        if (IS_ERR(c_node->event_work)) {
                DRM_ERROR("failed to create event work.\n");
+               ret = PTR_ERR(c_node->event_work);
                goto err_free_stop;
        }
 
index 50faf913e5749152f5d2a4dc29c655f33ce27f98..45899fb63272723003837d1e6f338a7b2afa3d87 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
+#include <linux/component.h>
 
 #include <drm/exynos_drm.h>
 
@@ -28,7 +29,6 @@
 /* vidi has totally three virtual windows. */
 #define WINDOWS_NR             3
 
-#define get_vidi_mgr(dev)      platform_get_drvdata(to_platform_device(dev))
 #define ctx_from_connector(c)  container_of(c, struct vidi_context, \
                                        connector)
 
@@ -47,11 +47,13 @@ struct vidi_win_data {
 };
 
 struct vidi_context {
+       struct exynos_drm_manager       manager;
+       struct exynos_drm_display       display;
+       struct platform_device          *pdev;
        struct drm_device               *drm_dev;
        struct drm_crtc                 *crtc;
        struct drm_encoder              *encoder;
        struct drm_connector            connector;
-       struct exynos_drm_subdrv        subdrv;
        struct vidi_win_data            win_data[WINDOWS_NR];
        struct edid                     *raw_edid;
        unsigned int                    clkdiv;
@@ -66,6 +68,16 @@ struct vidi_context {
        int                             pipe;
 };
 
+static inline struct vidi_context *manager_to_vidi(struct exynos_drm_manager *m)
+{
+       return container_of(m, struct vidi_context, manager);
+}
+
+static inline struct vidi_context *display_to_vidi(struct exynos_drm_display *d)
+{
+       return container_of(d, struct vidi_context, display);
+}
+
 static const char fake_edid_info[] = {
        0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0x05, 0x05,
        0x00, 0x00, 0x00, 0x00, 0x30, 0x12, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78,
@@ -93,7 +105,7 @@ static const char fake_edid_info[] = {
 
 static void vidi_apply(struct exynos_drm_manager *mgr)
 {
-       struct vidi_context *ctx = mgr->ctx;
+       struct vidi_context *ctx = manager_to_vidi(mgr);
        struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
        struct vidi_win_data *win_data;
        int i;
@@ -110,7 +122,7 @@ static void vidi_apply(struct exynos_drm_manager *mgr)
 
 static void vidi_commit(struct exynos_drm_manager *mgr)
 {
-       struct vidi_context *ctx = mgr->ctx;
+       struct vidi_context *ctx = manager_to_vidi(mgr);
 
        if (ctx->suspended)
                return;
@@ -118,7 +130,7 @@ static void vidi_commit(struct exynos_drm_manager *mgr)
 
 static int vidi_enable_vblank(struct exynos_drm_manager *mgr)
 {
-       struct vidi_context *ctx = mgr->ctx;
+       struct vidi_context *ctx = manager_to_vidi(mgr);
 
        if (ctx->suspended)
                return -EPERM;
@@ -140,7 +152,7 @@ static int vidi_enable_vblank(struct exynos_drm_manager *mgr)
 
 static void vidi_disable_vblank(struct exynos_drm_manager *mgr)
 {
-       struct vidi_context *ctx = mgr->ctx;
+       struct vidi_context *ctx = manager_to_vidi(mgr);
 
        if (ctx->suspended)
                return;
@@ -152,7 +164,7 @@ static void vidi_disable_vblank(struct exynos_drm_manager *mgr)
 static void vidi_win_mode_set(struct exynos_drm_manager *mgr,
                        struct exynos_drm_overlay *overlay)
 {
-       struct vidi_context *ctx = mgr->ctx;
+       struct vidi_context *ctx = manager_to_vidi(mgr);
        struct vidi_win_data *win_data;
        int win;
        unsigned long offset;
@@ -204,7 +216,7 @@ static void vidi_win_mode_set(struct exynos_drm_manager *mgr,
 
 static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
 {
-       struct vidi_context *ctx = mgr->ctx;
+       struct vidi_context *ctx = manager_to_vidi(mgr);
        struct vidi_win_data *win_data;
        int win = zpos;
 
@@ -229,7 +241,7 @@ static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
 
 static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos)
 {
-       struct vidi_context *ctx = mgr->ctx;
+       struct vidi_context *ctx = manager_to_vidi(mgr);
        struct vidi_win_data *win_data;
        int win = zpos;
 
@@ -247,7 +259,7 @@ static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos)
 
 static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable)
 {
-       struct vidi_context *ctx = mgr->ctx;
+       struct vidi_context *ctx = manager_to_vidi(mgr);
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -271,7 +283,7 @@ static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable)
 
 static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
 {
-       struct vidi_context *ctx = mgr->ctx;
+       struct vidi_context *ctx = manager_to_vidi(mgr);
 
        DRM_DEBUG_KMS("%d\n", mode);
 
@@ -297,7 +309,7 @@ static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
 static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
                        struct drm_device *drm_dev)
 {
-       struct vidi_context *ctx = mgr->ctx;
+       struct vidi_context *ctx = manager_to_vidi(mgr);
        struct exynos_drm_private *priv = drm_dev->dev_private;
 
        mgr->drm_dev = ctx->drm_dev = drm_dev;
@@ -316,11 +328,6 @@ static struct exynos_drm_manager_ops vidi_manager_ops = {
        .win_disable = vidi_win_disable,
 };
 
-static struct exynos_drm_manager vidi_manager = {
-       .type = EXYNOS_DISPLAY_TYPE_VIDI,
-       .ops = &vidi_manager_ops,
-};
-
 static void vidi_fake_vblank_handler(struct work_struct *work)
 {
        struct vidi_context *ctx = container_of(work, struct vidi_context,
@@ -349,9 +356,8 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
 static int vidi_show_connection(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
+       struct vidi_context *ctx = dev_get_drvdata(dev);
        int rc;
-       struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
-       struct vidi_context *ctx = mgr->ctx;
 
        mutex_lock(&ctx->lock);
 
@@ -366,8 +372,7 @@ static int vidi_store_connection(struct device *dev,
                                struct device_attribute *attr,
                                const char *buf, size_t len)
 {
-       struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
-       struct vidi_context *ctx = mgr->ctx;
+       struct vidi_context *ctx = dev_get_drvdata(dev);
        int ret;
 
        ret = kstrtoint(buf, 0, &ctx->connected);
@@ -420,7 +425,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
                display = exynos_drm_get_display(encoder);
 
                if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) {
-                       ctx = display->ctx;
+                       ctx = display_to_vidi(display);
                        break;
                }
        }
@@ -530,7 +535,7 @@ static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
 static int vidi_create_connector(struct exynos_drm_display *display,
                                struct drm_encoder *encoder)
 {
-       struct vidi_context *ctx = display->ctx;
+       struct vidi_context *ctx = display_to_vidi(display);
        struct drm_connector *connector = &ctx->connector;
        int ret;
 
@@ -556,27 +561,22 @@ static struct exynos_drm_display_ops vidi_display_ops = {
        .create_connector = vidi_create_connector,
 };
 
-static struct exynos_drm_display vidi_display = {
-       .type = EXYNOS_DISPLAY_TYPE_VIDI,
-       .ops = &vidi_display_ops,
-};
-
-static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+static int vidi_bind(struct device *dev, struct device *master, void *data)
 {
-       struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
-       struct vidi_context *ctx = mgr->ctx;
+       struct vidi_context *ctx = dev_get_drvdata(dev);
+       struct drm_device *drm_dev = data;
        struct drm_crtc *crtc = ctx->crtc;
        int ret;
 
-       vidi_mgr_initialize(mgr, drm_dev);
+       vidi_mgr_initialize(&ctx->manager, drm_dev);
 
-       ret = exynos_drm_crtc_create(&vidi_manager);
+       ret = exynos_drm_crtc_create(&ctx->manager);
        if (ret) {
                DRM_ERROR("failed to create crtc.\n");
                return ret;
        }
 
-       ret = exynos_drm_create_enc_conn(drm_dev, &vidi_display);
+       ret = exynos_drm_create_enc_conn(drm_dev, &ctx->display);
        if (ret) {
                crtc->funcs->destroy(crtc);
                DRM_ERROR("failed to create encoder and connector.\n");
@@ -586,9 +586,18 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
        return 0;
 }
 
+
+static void vidi_unbind(struct device *dev, struct device *master, void *data)
+{
+}
+
+static const struct component_ops vidi_component_ops = {
+       .bind   = vidi_bind,
+       .unbind = vidi_unbind,
+};
+
 static int vidi_probe(struct platform_device *pdev)
 {
-       struct exynos_drm_subdrv *subdrv;
        struct vidi_context *ctx;
        int ret;
 
@@ -596,40 +605,54 @@ static int vidi_probe(struct platform_device *pdev)
        if (!ctx)
                return -ENOMEM;
 
+       ctx->manager.type = EXYNOS_DISPLAY_TYPE_VIDI;
+       ctx->manager.ops = &vidi_manager_ops;
+       ctx->display.type = EXYNOS_DISPLAY_TYPE_VIDI;
+       ctx->display.ops = &vidi_display_ops;
        ctx->default_win = 0;
+       ctx->pdev = pdev;
 
-       INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
-
-       vidi_manager.ctx = ctx;
-       vidi_display.ctx = ctx;
+       ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
+                                       ctx->manager.type);
+       if (ret)
+               return ret;
 
-       mutex_init(&ctx->lock);
+       ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+                                       ctx->display.type);
+       if (ret)
+               goto err_del_crtc_component;
 
-       platform_set_drvdata(pdev, &vidi_manager);
+       INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
 
-       subdrv = &ctx->subdrv;
-       subdrv->dev = &pdev->dev;
-       subdrv->probe = vidi_subdrv_probe;
+       mutex_init(&ctx->lock);
 
-       ret = exynos_drm_subdrv_register(subdrv);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "failed to register drm vidi device\n");
-               return ret;
-       }
+       platform_set_drvdata(pdev, ctx);
 
        ret = device_create_file(&pdev->dev, &dev_attr_connection);
        if (ret < 0) {
-               exynos_drm_subdrv_unregister(subdrv);
-               DRM_INFO("failed to create connection sysfs.\n");
+               DRM_ERROR("failed to create connection sysfs.\n");
+               goto err_del_conn_component;
        }
 
-       return 0;
+       ret = component_add(&pdev->dev, &vidi_component_ops);
+       if (ret)
+               goto err_remove_file;
+
+       return ret;
+
+err_remove_file:
+       device_remove_file(&pdev->dev, &dev_attr_connection);
+err_del_conn_component:
+       exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+err_del_crtc_component:
+       exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+
+       return ret;
 }
 
 static int vidi_remove(struct platform_device *pdev)
 {
-       struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
-       struct vidi_context *ctx = mgr->ctx;
+       struct vidi_context *ctx = platform_get_drvdata(pdev);
 
        if (ctx->raw_edid != (struct edid *)fake_edid_info) {
                kfree(ctx->raw_edid);
@@ -638,6 +661,10 @@ static int vidi_remove(struct platform_device *pdev)
                return -EINVAL;
        }
 
+       component_del(&pdev->dev, &vidi_component_ops);
+       exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+       exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+
        return 0;
 }
 
@@ -668,12 +695,19 @@ int exynos_drm_probe_vidi(void)
        return ret;
 }
 
+static int exynos_drm_remove_vidi_device(struct device *dev, void *data)
+{
+       platform_device_unregister(to_platform_device(dev));
+
+       return 0;
+}
+
 void exynos_drm_remove_vidi(void)
 {
-       struct vidi_context *ctx = vidi_manager.ctx;
-       struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
-       struct platform_device *pdev = to_platform_device(subdrv->dev);
+       int ret = driver_for_each_device(&vidi_driver.driver, NULL, NULL,
+                                        exynos_drm_remove_vidi_device);
+       /* silence compiler warning */
+       (void)ret;
 
        platform_driver_unregister(&vidi_driver);
-       platform_device_unregister(pdev);
 }
index 563a19e62eb2cb5ccd9e13a7f35bc589c9824431..5765a161abdd4b35c958086ad9fdf996b146dbc1 100644 (file)
@@ -49,7 +49,6 @@
 #include <linux/gpio.h>
 #include <media/s5p_hdmi.h>
 
-#define get_hdmi_display(dev)  platform_get_drvdata(to_platform_device(dev))
 #define ctx_from_connector(c)  container_of(c, struct hdmi_context, connector)
 
 #define HOTPLUG_DEBOUNCE_MS            1100
@@ -182,6 +181,7 @@ struct hdmi_conf_regs {
 };
 
 struct hdmi_context {
+       struct exynos_drm_display       display;
        struct device                   *dev;
        struct drm_device               *drm_dev;
        struct drm_connector            connector;
@@ -213,6 +213,11 @@ struct hdmi_context {
        enum hdmi_type                  type;
 };
 
+static inline struct hdmi_context *display_to_hdmi(struct exynos_drm_display *d)
+{
+       return container_of(d, struct hdmi_context, display);
+}
+
 struct hdmiphy_config {
        int pixel_clock;
        u8 conf[32];
@@ -1123,7 +1128,7 @@ static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
 static int hdmi_create_connector(struct exynos_drm_display *display,
                        struct drm_encoder *encoder)
 {
-       struct hdmi_context *hdata = display->ctx;
+       struct hdmi_context *hdata = display_to_hdmi(display);
        struct drm_connector *connector = &hdata->connector;
        int ret;
 
@@ -2000,7 +2005,7 @@ static void hdmi_v14_mode_set(struct hdmi_context *hdata,
 static void hdmi_mode_set(struct exynos_drm_display *display,
                        struct drm_display_mode *mode)
 {
-       struct hdmi_context *hdata = display->ctx;
+       struct hdmi_context *hdata = display_to_hdmi(display);
        struct drm_display_mode *m = mode;
 
        DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
@@ -2019,7 +2024,7 @@ static void hdmi_mode_set(struct exynos_drm_display *display,
 
 static void hdmi_commit(struct exynos_drm_display *display)
 {
-       struct hdmi_context *hdata = display->ctx;
+       struct hdmi_context *hdata = display_to_hdmi(display);
 
        mutex_lock(&hdata->hdmi_mutex);
        if (!hdata->powered) {
@@ -2033,7 +2038,7 @@ static void hdmi_commit(struct exynos_drm_display *display)
 
 static void hdmi_poweron(struct exynos_drm_display *display)
 {
-       struct hdmi_context *hdata = display->ctx;
+       struct hdmi_context *hdata = display_to_hdmi(display);
        struct hdmi_resources *res = &hdata->res;
 
        mutex_lock(&hdata->hdmi_mutex);
@@ -2064,7 +2069,7 @@ static void hdmi_poweron(struct exynos_drm_display *display)
 
 static void hdmi_poweroff(struct exynos_drm_display *display)
 {
-       struct hdmi_context *hdata = display->ctx;
+       struct hdmi_context *hdata = display_to_hdmi(display);
        struct hdmi_resources *res = &hdata->res;
 
        mutex_lock(&hdata->hdmi_mutex);
@@ -2099,7 +2104,7 @@ out:
 
 static void hdmi_dpms(struct exynos_drm_display *display, int mode)
 {
-       struct hdmi_context *hdata = display->ctx;
+       struct hdmi_context *hdata = display_to_hdmi(display);
        struct drm_encoder *encoder = hdata->encoder;
        struct drm_crtc *crtc = encoder->crtc;
        struct drm_crtc_helper_funcs *funcs = NULL;
@@ -2143,11 +2148,6 @@ static struct exynos_drm_display_ops hdmi_display_ops = {
        .commit         = hdmi_commit,
 };
 
-static struct exynos_drm_display hdmi_display = {
-       .type = EXYNOS_DISPLAY_TYPE_HDMI,
-       .ops = &hdmi_display_ops,
-};
-
 static void hdmi_hotplug_work_func(struct work_struct *work)
 {
        struct hdmi_context *hdata;
@@ -2302,12 +2302,11 @@ MODULE_DEVICE_TABLE (of, hdmi_match_types);
 static int hdmi_bind(struct device *dev, struct device *master, void *data)
 {
        struct drm_device *drm_dev = data;
-       struct hdmi_context *hdata;
+       struct hdmi_context *hdata = dev_get_drvdata(dev);
 
-       hdata = hdmi_display.ctx;
        hdata->drm_dev = drm_dev;
 
-       return exynos_drm_create_enc_conn(drm_dev, &hdmi_display);
+       return exynos_drm_create_enc_conn(drm_dev, &hdata->display);
 }
 
 static void hdmi_unbind(struct device *dev, struct device *master, void *data)
@@ -2349,31 +2348,28 @@ static int hdmi_probe(struct platform_device *pdev)
        struct resource *res;
        int ret;
 
-       ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
-                                       hdmi_display.type);
-       if (ret)
-               return ret;
-
-       if (!dev->of_node) {
-               ret = -ENODEV;
-               goto err_del_component;
-       }
+       if (!dev->of_node)
+               return -ENODEV;
 
        pdata = drm_hdmi_dt_parse_pdata(dev);
-       if (!pdata) {
-               ret = -EINVAL;
-               goto err_del_component;
-       }
+       if (!pdata)
+               return -EINVAL;
 
        hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
-       if (!hdata) {
-               ret = -ENOMEM;
-               goto err_del_component;
-       }
+       if (!hdata)
+               return -ENOMEM;
+
+       hdata->display.type = EXYNOS_DISPLAY_TYPE_HDMI;
+       hdata->display.ops = &hdmi_display_ops;
+
+       ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+                                       hdata->display.type);
+       if (ret)
+               return ret;
 
        mutex_init(&hdata->hdmi_mutex);
 
-       platform_set_drvdata(pdev, &hdmi_display);
+       platform_set_drvdata(pdev, hdata);
 
        match = of_match_node(hdmi_match_types, dev->of_node);
        if (!match) {
@@ -2485,7 +2481,6 @@ out_get_phy_port:
        }
 
        pm_runtime_enable(dev);
-       hdmi_display.ctx = hdata;
 
        ret = component_add(&pdev->dev, &hdmi_component_ops);
        if (ret)
@@ -2510,7 +2505,7 @@ err_del_component:
 
 static int hdmi_remove(struct platform_device *pdev)
 {
-       struct hdmi_context *hdata = hdmi_display.ctx;
+       struct hdmi_context *hdata = platform_get_drvdata(pdev);
 
        cancel_delayed_work_sync(&hdata->hotplug_work);
 
index a41c84ee3a2d554a464219cf4000083b2e3fa4dd..820b76234ef4c5c6da02ebe50327066c5fe38d1a 100644 (file)
@@ -40,8 +40,6 @@
 #include "exynos_drm_iommu.h"
 #include "exynos_mixer.h"
 
-#define get_mixer_manager(dev) platform_get_drvdata(to_platform_device(dev))
-
 #define MIXER_WIN_NR           3
 #define MIXER_DEFAULT_WIN      0
 
@@ -86,6 +84,7 @@ enum mixer_version_id {
 };
 
 struct mixer_context {
+       struct exynos_drm_manager manager;
        struct platform_device *pdev;
        struct device           *dev;
        struct drm_device       *drm_dev;
@@ -104,6 +103,11 @@ struct mixer_context {
        atomic_t                wait_vsync_event;
 };
 
+static inline struct mixer_context *mgr_to_mixer(struct exynos_drm_manager *mgr)
+{
+       return container_of(mgr, struct mixer_context, manager);
+}
+
 struct mixer_drv_data {
        enum mixer_version_id   version;
        bool                                    is_vp_enabled;
@@ -854,7 +858,7 @@ static int mixer_initialize(struct exynos_drm_manager *mgr,
                        struct drm_device *drm_dev)
 {
        int ret;
-       struct mixer_context *mixer_ctx = mgr->ctx;
+       struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
        struct exynos_drm_private *priv;
        priv = drm_dev->dev_private;
 
@@ -885,7 +889,7 @@ static int mixer_initialize(struct exynos_drm_manager *mgr,
 
 static void mixer_mgr_remove(struct exynos_drm_manager *mgr)
 {
-       struct mixer_context *mixer_ctx = mgr->ctx;
+       struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
 
        if (is_drm_iommu_supported(mixer_ctx->drm_dev))
                drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
@@ -893,7 +897,7 @@ static void mixer_mgr_remove(struct exynos_drm_manager *mgr)
 
 static int mixer_enable_vblank(struct exynos_drm_manager *mgr)
 {
-       struct mixer_context *mixer_ctx = mgr->ctx;
+       struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
        struct mixer_resources *res = &mixer_ctx->mixer_res;
 
        if (!mixer_ctx->powered) {
@@ -910,7 +914,7 @@ static int mixer_enable_vblank(struct exynos_drm_manager *mgr)
 
 static void mixer_disable_vblank(struct exynos_drm_manager *mgr)
 {
-       struct mixer_context *mixer_ctx = mgr->ctx;
+       struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
        struct mixer_resources *res = &mixer_ctx->mixer_res;
 
        /* disable vsync interrupt */
@@ -920,7 +924,7 @@ static void mixer_disable_vblank(struct exynos_drm_manager *mgr)
 static void mixer_win_mode_set(struct exynos_drm_manager *mgr,
                        struct exynos_drm_overlay *overlay)
 {
-       struct mixer_context *mixer_ctx = mgr->ctx;
+       struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
        struct hdmi_win_data *win_data;
        int win;
 
@@ -971,7 +975,7 @@ static void mixer_win_mode_set(struct exynos_drm_manager *mgr,
 
 static void mixer_win_commit(struct exynos_drm_manager *mgr, int zpos)
 {
-       struct mixer_context *mixer_ctx = mgr->ctx;
+       struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
        int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
 
        DRM_DEBUG_KMS("win: %d\n", win);
@@ -993,7 +997,7 @@ static void mixer_win_commit(struct exynos_drm_manager *mgr, int zpos)
 
 static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
 {
-       struct mixer_context *mixer_ctx = mgr->ctx;
+       struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
        struct mixer_resources *res = &mixer_ctx->mixer_res;
        int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
        unsigned long flags;
@@ -1021,7 +1025,7 @@ static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
 
 static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
 {
-       struct mixer_context *mixer_ctx = mgr->ctx;
+       struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
 
        mutex_lock(&mixer_ctx->mixer_mutex);
        if (!mixer_ctx->powered) {
@@ -1048,7 +1052,7 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
 
 static void mixer_window_suspend(struct exynos_drm_manager *mgr)
 {
-       struct mixer_context *ctx = mgr->ctx;
+       struct mixer_context *ctx = mgr_to_mixer(mgr);
        struct hdmi_win_data *win_data;
        int i;
 
@@ -1062,7 +1066,7 @@ static void mixer_window_suspend(struct exynos_drm_manager *mgr)
 
 static void mixer_window_resume(struct exynos_drm_manager *mgr)
 {
-       struct mixer_context *ctx = mgr->ctx;
+       struct mixer_context *ctx = mgr_to_mixer(mgr);
        struct hdmi_win_data *win_data;
        int i;
 
@@ -1077,7 +1081,7 @@ static void mixer_window_resume(struct exynos_drm_manager *mgr)
 
 static void mixer_poweron(struct exynos_drm_manager *mgr)
 {
-       struct mixer_context *ctx = mgr->ctx;
+       struct mixer_context *ctx = mgr_to_mixer(mgr);
        struct mixer_resources *res = &ctx->mixer_res;
 
        mutex_lock(&ctx->mixer_mutex);
@@ -1111,7 +1115,7 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
 
 static void mixer_poweroff(struct exynos_drm_manager *mgr)
 {
-       struct mixer_context *ctx = mgr->ctx;
+       struct mixer_context *ctx = mgr_to_mixer(mgr);
        struct mixer_resources *res = &ctx->mixer_res;
 
        mutex_lock(&ctx->mixer_mutex);
@@ -1187,11 +1191,6 @@ static struct exynos_drm_manager_ops mixer_manager_ops = {
        .win_disable            = mixer_win_disable,
 };
 
-static struct exynos_drm_manager mixer_manager = {
-       .type                   = EXYNOS_DISPLAY_TYPE_HDMI,
-       .ops                    = &mixer_manager_ops,
-};
-
 static struct mixer_drv_data exynos5420_mxr_drv_data = {
        .version = MXR_VER_128_0_0_184,
        .is_vp_enabled = 0,
@@ -1249,48 +1248,17 @@ MODULE_DEVICE_TABLE(of, mixer_match_types);
 
 static int mixer_bind(struct device *dev, struct device *manager, void *data)
 {
-       struct platform_device *pdev = to_platform_device(dev);
+       struct mixer_context *ctx = dev_get_drvdata(dev);
        struct drm_device *drm_dev = data;
-       struct mixer_context *ctx;
-       struct mixer_drv_data *drv;
        int ret;
 
-       dev_info(dev, "probe start\n");
-
-       ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
-       if (!ctx) {
-               DRM_ERROR("failed to alloc mixer context.\n");
-               return -ENOMEM;
-       }
-
-       mutex_init(&ctx->mixer_mutex);
-
-       if (dev->of_node) {
-               const struct of_device_id *match;
-               match = of_match_node(mixer_match_types, dev->of_node);
-               drv = (struct mixer_drv_data *)match->data;
-       } else {
-               drv = (struct mixer_drv_data *)
-                       platform_get_device_id(pdev)->driver_data;
-       }
-
-       ctx->pdev = pdev;
-       ctx->dev = dev;
-       ctx->vp_enabled = drv->is_vp_enabled;
-       ctx->has_sclk = drv->has_sclk;
-       ctx->mxr_ver = drv->version;
-       init_waitqueue_head(&ctx->wait_vsync_queue);
-       atomic_set(&ctx->wait_vsync_event, 0);
-
-       mixer_manager.ctx = ctx;
-       ret = mixer_initialize(&mixer_manager, drm_dev);
+       ret = mixer_initialize(&ctx->manager, drm_dev);
        if (ret)
                return ret;
 
-       platform_set_drvdata(pdev, &mixer_manager);
-       ret = exynos_drm_crtc_create(&mixer_manager);
+       ret = exynos_drm_crtc_create(&ctx->manager);
        if (ret) {
-               mixer_mgr_remove(&mixer_manager);
+               mixer_mgr_remove(&ctx->manager);
                return ret;
        }
 
@@ -1301,11 +1269,9 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
 
 static void mixer_unbind(struct device *dev, struct device *master, void *data)
 {
-       struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
+       struct mixer_context *ctx = dev_get_drvdata(dev);
 
-       dev_info(dev, "remove successful\n");
-
-       mixer_mgr_remove(mgr);
+       mixer_mgr_remove(&ctx->manager);
 
        pm_runtime_disable(dev);
 }
@@ -1317,22 +1283,62 @@ static const struct component_ops mixer_component_ops = {
 
 static int mixer_probe(struct platform_device *pdev)
 {
+       struct device *dev = &pdev->dev;
+       struct mixer_drv_data *drv;
+       struct mixer_context *ctx;
        int ret;
 
+       ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx) {
+               DRM_ERROR("failed to alloc mixer context.\n");
+               return -ENOMEM;
+       }
+
+       mutex_init(&ctx->mixer_mutex);
+
+       ctx->manager.type = EXYNOS_DISPLAY_TYPE_HDMI;
+       ctx->manager.ops = &mixer_manager_ops;
+
+       if (dev->of_node) {
+               const struct of_device_id *match;
+
+               match = of_match_node(mixer_match_types, dev->of_node);
+               drv = (struct mixer_drv_data *)match->data;
+       } else {
+               drv = (struct mixer_drv_data *)
+                       platform_get_device_id(pdev)->driver_data;
+       }
+
+       ctx->pdev = pdev;
+       ctx->dev = dev;
+       ctx->vp_enabled = drv->is_vp_enabled;
+       ctx->has_sclk = drv->has_sclk;
+       ctx->mxr_ver = drv->version;
+       init_waitqueue_head(&ctx->wait_vsync_queue);
+       atomic_set(&ctx->wait_vsync_event, 0);
+
+       platform_set_drvdata(pdev, ctx);
+
        ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
-                                       mixer_manager.type);
+                                       ctx->manager.type);
        if (ret)
                return ret;
 
        ret = component_add(&pdev->dev, &mixer_component_ops);
-       if (ret)
+       if (ret) {
                exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+               return ret;
+       }
+
+       pm_runtime_enable(dev);
 
        return ret;
 }
 
 static int mixer_remove(struct platform_device *pdev)
 {
+       pm_runtime_disable(&pdev->dev);
+
        component_del(&pdev->dev, &mixer_component_ops);
        exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
 
index b1531557637662af7c0e3bc576602514f3701d63..190e55f2f891046420942b6bb5d8423e25753254 100644 (file)
@@ -39,6 +39,7 @@ gma500_gfx-$(CONFIG_DRM_GMA3600) +=  cdv_device.o \
 gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
          oaktrail_crtc.o \
          oaktrail_lvds.o \
+         oaktrail_lvds_i2c.o \
          oaktrail_hdmi.o \
          oaktrail_hdmi_i2c.o
 
index 9f158eab517a46c0f40ae8a8b37066e3a1e031cd..0fafb8e2483aa074ebd5ced75024c53ba435a5f7 100644 (file)
 #include "gma_display.h"
 #include <drm/drm_dp_helper.h>
 
+/**
+ * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
+ *                              aux algorithm
+ * @running: set by the algo indicating whether an i2c is ongoing or whether
+ *          the i2c bus is quiescent
+ * @address: i2c target address for the currently ongoing transfer
+ * @aux_ch: driver callback to transfer a single byte of the i2c payload
+ */
+struct i2c_algo_dp_aux_data {
+       bool running;
+       u16 address;
+       int (*aux_ch) (struct i2c_adapter *adapter,
+                      int mode, uint8_t write_byte,
+                      uint8_t *read_byte);
+};
+
+/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
+static int
+i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
+                           uint8_t write_byte, uint8_t *read_byte)
+{
+       struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+       int ret;
+
+       ret = (*algo_data->aux_ch)(adapter, mode,
+                                  write_byte, read_byte);
+       return ret;
+}
+
+/*
+ * I2C over AUX CH
+ */
+
+/*
+ * Send the address. If the I2C link is running, this 'restarts'
+ * the connection with the new address, this is used for doing
+ * a write followed by a read (as needed for DDC)
+ */
+static int
+i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
+{
+       struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+       int mode = MODE_I2C_START;
+       int ret;
+
+       if (reading)
+               mode |= MODE_I2C_READ;
+       else
+               mode |= MODE_I2C_WRITE;
+       algo_data->address = address;
+       algo_data->running = true;
+       ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+       return ret;
+}
+
+/*
+ * Stop the I2C transaction. This closes out the link, sending
+ * a bare address packet with the MOT bit turned off
+ */
+static void
+i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
+{
+       struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+       int mode = MODE_I2C_STOP;
+
+       if (reading)
+               mode |= MODE_I2C_READ;
+       else
+               mode |= MODE_I2C_WRITE;
+       if (algo_data->running) {
+               (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+               algo_data->running = false;
+       }
+}
+
+/*
+ * Write a single byte to the current I2C address, the
+ * the I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
+{
+       struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+       int ret;
+
+       if (!algo_data->running)
+               return -EIO;
+
+       ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
+       return ret;
+}
+
+/*
+ * Read a single byte from the current I2C address, the
+ * I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
+{
+       struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+       int ret;
+
+       if (!algo_data->running)
+               return -EIO;
+
+       ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
+       return ret;
+}
+
+static int
+i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
+                    struct i2c_msg *msgs,
+                    int num)
+{
+       int ret = 0;
+       bool reading = false;
+       int m;
+       int b;
+
+       for (m = 0; m < num; m++) {
+               u16 len = msgs[m].len;
+               u8 *buf = msgs[m].buf;
+               reading = (msgs[m].flags & I2C_M_RD) != 0;
+               ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
+               if (ret < 0)
+                       break;
+               if (reading) {
+                       for (b = 0; b < len; b++) {
+                               ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
+                               if (ret < 0)
+                                       break;
+                       }
+               } else {
+                       for (b = 0; b < len; b++) {
+                               ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
+                               if (ret < 0)
+                                       break;
+                       }
+               }
+               if (ret < 0)
+                       break;
+       }
+       if (ret >= 0)
+               ret = num;
+       i2c_algo_dp_aux_stop(adapter, reading);
+       DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
+       return ret;
+}
+
+static u32
+i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+              I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+              I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+              I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm i2c_dp_aux_algo = {
+       .master_xfer    = i2c_algo_dp_aux_xfer,
+       .functionality  = i2c_algo_dp_aux_functionality,
+};
+
+static void
+i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
+{
+       (void) i2c_algo_dp_aux_address(adapter, 0, false);
+       (void) i2c_algo_dp_aux_stop(adapter, false);
+}
+
+static int
+i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
+{
+       adapter->algo = &i2c_dp_aux_algo;
+       adapter->retries = 3;
+       i2c_dp_aux_reset_bus(adapter);
+       return 0;
+}
+
+/*
+ * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to
+ * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon.
+ */
+static int __deprecated
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
+{
+       int error;
+
+       error = i2c_dp_aux_prepare_bus(adapter);
+       if (error)
+               return error;
+       error = i2c_add_adapter(adapter);
+       return error;
+}
+
 #define _wait_for(COND, MS, W) ({ \
         unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);       \
         int ret__ = 0;                                                  \
index 87885d8c06e849290be8dfb543dc0caaa88cac4d..6b43ae3ffd73e067f7b52a51cdde68fbab3f259a 100644 (file)
@@ -25,6 +25,7 @@
  */
 
 #include <linux/freezer.h>
+#include <video/mipi_display.h>
 
 #include "mdfld_dsi_output.h"
 #include "mdfld_dsi_pkg_sender.h"
 
 #define MDFLD_DSI_READ_MAX_COUNT               5000
 
-enum data_type {
-       DSI_DT_GENERIC_SHORT_WRITE_0    = 0x03,
-       DSI_DT_GENERIC_SHORT_WRITE_1    = 0x13,
-       DSI_DT_GENERIC_SHORT_WRITE_2    = 0x23,
-       DSI_DT_GENERIC_READ_0           = 0x04,
-       DSI_DT_GENERIC_READ_1           = 0x14,
-       DSI_DT_GENERIC_READ_2           = 0x24,
-       DSI_DT_GENERIC_LONG_WRITE       = 0x29,
-       DSI_DT_DCS_SHORT_WRITE_0        = 0x05,
-       DSI_DT_DCS_SHORT_WRITE_1        = 0x15,
-       DSI_DT_DCS_READ                 = 0x06,
-       DSI_DT_DCS_LONG_WRITE           = 0x39,
-};
-
 enum {
        MDFLD_DSI_PANEL_MODE_SLEEP = 0x1,
 };
@@ -321,9 +308,9 @@ static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
        u8 cmd;
 
        switch (data_type) {
-       case DSI_DT_DCS_SHORT_WRITE_0:
-       case DSI_DT_DCS_SHORT_WRITE_1:
-       case DSI_DT_DCS_LONG_WRITE:
+       case MIPI_DSI_DCS_SHORT_WRITE:
+       case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+       case MIPI_DSI_DCS_LONG_WRITE:
                cmd = *data;
                break;
        default:
@@ -334,12 +321,12 @@ static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
        sender->status = MDFLD_DSI_PKG_SENDER_BUSY;
 
        /*wait for 120 milliseconds in case exit_sleep_mode just be sent*/
-       if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) {
+       if (unlikely(cmd == MIPI_DCS_ENTER_SLEEP_MODE)) {
                /*TODO: replace it with msleep later*/
                mdelay(120);
        }
 
-       if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) {
+       if (unlikely(cmd == MIPI_DCS_EXIT_SLEEP_MODE)) {
                /*TODO: replace it with msleep later*/
                mdelay(120);
        }
@@ -352,9 +339,9 @@ static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
        u8 cmd;
 
        switch (data_type) {
-       case DSI_DT_DCS_SHORT_WRITE_0:
-       case DSI_DT_DCS_SHORT_WRITE_1:
-       case DSI_DT_DCS_LONG_WRITE:
+       case MIPI_DSI_DCS_SHORT_WRITE:
+       case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+       case MIPI_DSI_DCS_LONG_WRITE:
                cmd = *data;
                break;
        default:
@@ -362,15 +349,15 @@ static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
        }
 
        /*update panel status*/
-       if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) {
+       if (unlikely(cmd == MIPI_DCS_ENTER_SLEEP_MODE)) {
                sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP;
                /*TODO: replace it with msleep later*/
                mdelay(120);
-       } else if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) {
+       } else if (unlikely(cmd == MIPI_DCS_EXIT_SLEEP_MODE)) {
                sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP;
                /*TODO: replace it with msleep later*/
                mdelay(120);
-       } else if (unlikely(cmd == DCS_SOFT_RESET)) {
+       } else if (unlikely(cmd == MIPI_DCS_SOFT_RESET)) {
                /*TODO: replace it with msleep later*/
                mdelay(5);
        }
@@ -405,19 +392,19 @@ static int send_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
        }
 
        switch (data_type) {
-       case DSI_DT_GENERIC_SHORT_WRITE_0:
-       case DSI_DT_GENERIC_SHORT_WRITE_1:
-       case DSI_DT_GENERIC_SHORT_WRITE_2:
-       case DSI_DT_GENERIC_READ_0:
-       case DSI_DT_GENERIC_READ_1:
-       case DSI_DT_GENERIC_READ_2:
-       case DSI_DT_DCS_SHORT_WRITE_0:
-       case DSI_DT_DCS_SHORT_WRITE_1:
-       case DSI_DT_DCS_READ:
+       case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+       case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+       case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+       case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+       case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+       case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+       case MIPI_DSI_DCS_SHORT_WRITE:
+       case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+       case MIPI_DSI_DCS_READ:
                ret = send_short_pkg(sender, data_type, data[0], data[1], hs);
                break;
-       case DSI_DT_GENERIC_LONG_WRITE:
-       case DSI_DT_DCS_LONG_WRITE:
+       case MIPI_DSI_GENERIC_LONG_WRITE:
+       case MIPI_DSI_DCS_LONG_WRITE:
                ret = send_long_pkg(sender, data_type, data, len, hs);
                break;
        }
@@ -440,7 +427,7 @@ int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
        }
 
        spin_lock_irqsave(&sender->lock, flags);
-       send_pkg(sender, DSI_DT_DCS_LONG_WRITE, data, len, hs);
+       send_pkg(sender, MIPI_DSI_DCS_LONG_WRITE, data, len, hs);
        spin_unlock_irqrestore(&sender->lock, flags);
 
        return 0;
@@ -461,10 +448,10 @@ int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
        data[0] = cmd;
 
        if (param_num) {
-               data_type = DSI_DT_DCS_SHORT_WRITE_1;
+               data_type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
                data[1] = param;
        } else {
-               data_type = DSI_DT_DCS_SHORT_WRITE_0;
+               data_type = MIPI_DSI_DCS_SHORT_WRITE;
                data[1] = 0;
        }
 
@@ -489,17 +476,17 @@ int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
 
        switch (param_num) {
        case 0:
-               data_type = DSI_DT_GENERIC_SHORT_WRITE_0;
+               data_type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM;
                data[0] = 0;
                data[1] = 0;
                break;
        case 1:
-               data_type = DSI_DT_GENERIC_SHORT_WRITE_1;
+               data_type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM;
                data[0] = param0;
                data[1] = 0;
                break;
        case 2:
-               data_type = DSI_DT_GENERIC_SHORT_WRITE_2;
+               data_type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM;
                data[0] = param0;
                data[1] = param1;
                break;
@@ -523,7 +510,7 @@ int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
        }
 
        spin_lock_irqsave(&sender->lock, flags);
-       send_pkg(sender, DSI_DT_GENERIC_LONG_WRITE, data, len, hs);
+       send_pkg(sender, MIPI_DSI_GENERIC_LONG_WRITE, data, len, hs);
        spin_unlock_irqrestore(&sender->lock, flags);
 
        return 0;
@@ -594,7 +581,7 @@ int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
                return -EINVAL;
        }
 
-       return __read_panel_data(sender, DSI_DT_DCS_READ, &cmd, 1,
+       return __read_panel_data(sender, MIPI_DSI_DCS_READ, &cmd, 1,
                                data, len, hs);
 }
 
index 459cd7ea8b81a098e9cf9ac8596d6105c7a7df44..0478a21c15d52d1d0eae8604183c2bfd39b7323e 100644 (file)
@@ -62,18 +62,6 @@ struct mdfld_dsi_pkg_sender {
        u32 mipi_cmd_len_reg;
 };
 
-/* DCS definitions */
-#define DCS_SOFT_RESET                 0x01
-#define DCS_ENTER_SLEEP_MODE           0x10
-#define DCS_EXIT_SLEEP_MODE            0x11
-#define DCS_SET_DISPLAY_OFF            0x28
-#define DCS_SET_DISPLAY_ON             0x29
-#define DCS_SET_COLUMN_ADDRESS         0x2a
-#define DCS_SET_PAGE_ADDRESS           0x2b
-#define DCS_WRITE_MEM_START            0x2c
-#define DCS_SET_TEAR_OFF               0x34
-#define DCS_SET_TEAR_ON                        0x35
-
 extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
                                        int pipe);
 extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender);
index 0d39da6e8b7a0627d7923d70b795a3f31db69e04..83bbc271bcfb5e92e1321a5a5842ea1fab26830a 100644 (file)
@@ -359,22 +359,26 @@ void oaktrail_lvds_init(struct drm_device *dev,
         *    if closed, act like it's not there for now
         */
 
+       edid = NULL;
        mutex_lock(&dev->mode_config.mutex);
        i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
-       if (i2c_adap == NULL)
-               dev_err(dev->dev, "No ddc adapter available!\n");
+       if (i2c_adap)
+               edid = drm_get_edid(connector, i2c_adap);
+       if (edid == NULL && dev_priv->lpc_gpio_base) {
+               oaktrail_lvds_i2c_init(encoder);
+               if (gma_encoder->ddc_bus != NULL) {
+                       i2c_adap = &gma_encoder->ddc_bus->adapter;
+                       edid = drm_get_edid(connector, i2c_adap);
+               }
+       }
        /*
         * Attempt to get the fixed panel mode from DDC.  Assume that the
         * preferred mode is the right one.
         */
-       if (i2c_adap) {
-               edid = drm_get_edid(connector, i2c_adap);
-               if (edid) {
-                       drm_mode_connector_update_edid_property(connector,
-                                                                       edid);
-                       drm_add_edid_modes(connector, edid);
-                       kfree(edid);
-               }
+       if (edid) {
+               drm_mode_connector_update_edid_property(connector, edid);
+               drm_add_edid_modes(connector, edid);
+               kfree(edid);
 
                list_for_each_entry(scan, &connector->probed_modes, head) {
                        if (scan->type & DRM_MODE_TYPE_PREFERRED) {
@@ -383,7 +387,8 @@ void oaktrail_lvds_init(struct drm_device *dev,
                                goto out;       /* FIXME: check for quirks */
                        }
                }
-       }
+       } else
+               dev_err(dev->dev, "No ddc adapter available!\n");
        /*
         * If we didn't get EDID, try geting panel timing
         * from configuration data
@@ -411,8 +416,10 @@ failed_find:
        mutex_unlock(&dev->mode_config.mutex);
 
        dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
-       if (gma_encoder->ddc_bus)
+       if (gma_encoder->ddc_bus) {
                psb_intel_i2c_destroy(gma_encoder->ddc_bus);
+               gma_encoder->ddc_bus = NULL;
+       }
 
 /* failed_ddc: */
 
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
new file mode 100644 (file)
index 0000000..f913a62
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2002-2010, Intel Corporation.
+ * Copyright (c) 2014 ATRON electronic GmbH
+ *   Author: Jan Safrata <jan.nikitenko@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+
+/*
+ * LPC GPIO based I2C bus for LVDS of Atom E6xx
+ */
+
+/*-----------------------------------------------------------------------------
+ * LPC Register Offsets. Used for LVDS GPIO Bit Bashing. Registers are part
+ * Atom E6xx [D31:F0]
+ ----------------------------------------------------------------------------*/
+#define RGEN    0x20
+#define RGIO    0x24
+#define RGLVL   0x28
+#define RGTPE   0x2C
+#define RGTNE   0x30
+#define RGGPE   0x34
+#define RGSMI   0x38
+#define RGTS    0x3C
+
+/* The LVDS GPIO clock lines are GPIOSUS[3]
+ * The LVDS GPIO data lines are GPIOSUS[4]
+ */
+#define GPIO_CLOCK     0x08
+#define GPIO_DATA      0x10
+
+#define LPC_READ_REG(chan, r) inl((chan)->reg + (r))
+#define LPC_WRITE_REG(chan, r, val) outl((val), (chan)->reg + (r))
+
+static int get_clock(void *data)
+{
+       struct psb_intel_i2c_chan *chan = data;
+       u32 val, tmp;
+
+       val = LPC_READ_REG(chan, RGIO);
+       val |= GPIO_CLOCK;
+       LPC_WRITE_REG(chan, RGIO, val);
+       tmp = LPC_READ_REG(chan, RGLVL);
+       val = (LPC_READ_REG(chan, RGLVL) & GPIO_CLOCK) ? 1 : 0;
+
+       return val;
+}
+
+static int get_data(void *data)
+{
+       struct psb_intel_i2c_chan *chan = data;
+       u32 val, tmp;
+
+       val = LPC_READ_REG(chan, RGIO);
+       val |= GPIO_DATA;
+       LPC_WRITE_REG(chan, RGIO, val);
+       tmp = LPC_READ_REG(chan, RGLVL);
+       val = (LPC_READ_REG(chan, RGLVL) & GPIO_DATA) ? 1 : 0;
+
+       return val;
+}
+
+static void set_clock(void *data, int state_high)
+{
+       struct psb_intel_i2c_chan *chan = data;
+       u32 val;
+
+       if (state_high) {
+               val = LPC_READ_REG(chan, RGIO);
+               val |= GPIO_CLOCK;
+               LPC_WRITE_REG(chan, RGIO, val);
+       } else {
+               val = LPC_READ_REG(chan, RGIO);
+               val &= ~GPIO_CLOCK;
+               LPC_WRITE_REG(chan, RGIO, val);
+               val = LPC_READ_REG(chan, RGLVL);
+               val &= ~GPIO_CLOCK;
+               LPC_WRITE_REG(chan, RGLVL, val);
+       }
+}
+
+static void set_data(void *data, int state_high)
+{
+       struct psb_intel_i2c_chan *chan = data;
+       u32 val;
+
+       if (state_high) {
+               val = LPC_READ_REG(chan, RGIO);
+               val |= GPIO_DATA;
+               LPC_WRITE_REG(chan, RGIO, val);
+       } else {
+               val = LPC_READ_REG(chan, RGIO);
+               val &= ~GPIO_DATA;
+               LPC_WRITE_REG(chan, RGIO, val);
+               val = LPC_READ_REG(chan, RGLVL);
+               val &= ~GPIO_DATA;
+               LPC_WRITE_REG(chan, RGLVL, val);
+       }
+}
+
+void oaktrail_lvds_i2c_init(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_i2c_chan *chan;
+
+       chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
+       if (!chan)
+               return;
+
+       chan->drm_dev = dev;
+       chan->reg = dev_priv->lpc_gpio_base;
+       strncpy(chan->adapter.name, "gma500 LPC",  I2C_NAME_SIZE - 1);
+       chan->adapter.owner = THIS_MODULE;
+       chan->adapter.algo_data = &chan->algo;
+       chan->adapter.dev.parent = &dev->pdev->dev;
+       chan->algo.setsda = set_data;
+       chan->algo.setscl = set_clock;
+       chan->algo.getsda = get_data;
+       chan->algo.getscl = get_clock;
+       chan->algo.udelay = 100;
+       chan->algo.timeout = usecs_to_jiffies(2200);
+       chan->algo.data = chan;
+
+       i2c_set_adapdata(&chan->adapter, chan);
+
+       set_data(chan, 1);
+       set_clock(chan, 1);
+       udelay(50);
+
+       if (i2c_bit_add_bus(&chan->adapter)) {
+               kfree(chan);
+               return;
+       }
+
+       gma_encoder->ddc_bus = chan;
+}
index 6ec3a905fdd274bbcd1ec169efc802e8cf1663be..92e7e5795398e80a6ab46f51d3aad4d22a4e8346 100644 (file)
@@ -212,6 +212,8 @@ static int psb_driver_unload(struct drm_device *dev)
                }
                if (dev_priv->aux_pdev)
                        pci_dev_put(dev_priv->aux_pdev);
+               if (dev_priv->lpc_pdev)
+                       pci_dev_put(dev_priv->lpc_pdev);
 
                /* Destroy VBT data */
                psb_intel_destroy_bios(dev);
@@ -280,6 +282,24 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
                        DRM_DEBUG_KMS("Couldn't find aux pci device");
                }
                dev_priv->gmbus_reg = dev_priv->aux_reg;
+
+               dev_priv->lpc_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(31, 0));
+               if (dev_priv->lpc_pdev) {
+                       pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA,
+                               &dev_priv->lpc_gpio_base);
+                       pci_write_config_dword(dev_priv->lpc_pdev, PSB_LPC_GBA,
+                               (u32)dev_priv->lpc_gpio_base | (1L<<31));
+                       pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA,
+                               &dev_priv->lpc_gpio_base);
+                       dev_priv->lpc_gpio_base &= 0xffc0;
+                       if (dev_priv->lpc_gpio_base)
+                               DRM_DEBUG_KMS("Found LPC GPIO at 0x%04x\n",
+                                               dev_priv->lpc_gpio_base);
+                       else {
+                               pci_dev_put(dev_priv->lpc_pdev);
+                               dev_priv->lpc_pdev = NULL;
+                       }
+               }
        } else {
                dev_priv->gmbus_reg = dev_priv->vdc_reg;
        }
index 55ebe2bd88dd75972b10c5cae18ed44cb3a50fd7..e38057b918657de7812e18071e084ae7538db8c8 100644 (file)
@@ -83,6 +83,7 @@ enum {
 #define PSB_PGETBL_CTL          0x2020
 #define _PSB_PGETBL_ENABLED     0x00000001
 #define PSB_SGX_2D_SLAVE_PORT   0x4000
+#define PSB_LPC_GBA             0x44
 
 /* TODO: To get rid of */
 #define PSB_TT_PRIV0_LIMIT      (256*1024*1024)
@@ -441,6 +442,7 @@ struct psb_ops;
 struct drm_psb_private {
        struct drm_device *dev;
        struct pci_dev *aux_pdev; /* Currently only used by mrst */
+       struct pci_dev *lpc_pdev; /* Currently only used by mrst */
        const struct psb_ops *ops;
        const struct psb_offset *regmap;
        
@@ -470,6 +472,7 @@ struct drm_psb_private {
        uint8_t __iomem *sgx_reg;
        uint8_t __iomem *vdc_reg;
        uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */
+       uint16_t lpc_gpio_base;
        uint32_t gatt_free_offset;
 
        /* Fencing / irq */
index 87b50ba64ed40aa640e74ec37e90e4008c0875e7..b21a09451d1d506836b18f0ab7fb363ebfe7d924 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/i2c.h>
 
 #include <drm/drmP.h>
+#include <drm/drm_plane_helper.h>
 #include "framebuffer.h"
 #include "psb_drv.h"
 #include "psb_intel_drv.h"
index 336bd3aa1a066868af57f131c3260629abdb27c0..860dd2177ca15269bfedc7cf1ca2b4dafc3bddae 100644 (file)
@@ -223,6 +223,7 @@ extern void oaktrail_lvds_init(struct drm_device *dev,
 extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev);
 extern void oaktrail_dsi_init(struct drm_device *dev,
                           struct psb_intel_mode_device *mode_dev);
+extern void oaktrail_lvds_i2c_init(struct drm_encoder *encoder);
 extern void mid_dsi_init(struct drm_device *dev,
                    struct psb_intel_mode_device *mode_dev, int dsi_num);
 
index 0be96fdb5e28ba4e63ae51b6eb2b2d9a947028d4..58529cea575d3c9492e8ea267b56e22772bf2490 100644 (file)
@@ -1631,57 +1631,8 @@ static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
        return !list_empty(&connector->probed_modes);
 }
 
-static void
-psb_intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
-{
-       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
-       struct drm_device *dev = connector->dev;
-
-       if (psb_intel_sdvo_connector->left)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->left);
-       if (psb_intel_sdvo_connector->right)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->right);
-       if (psb_intel_sdvo_connector->top)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->top);
-       if (psb_intel_sdvo_connector->bottom)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->bottom);
-       if (psb_intel_sdvo_connector->hpos)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->hpos);
-       if (psb_intel_sdvo_connector->vpos)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->vpos);
-       if (psb_intel_sdvo_connector->saturation)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->saturation);
-       if (psb_intel_sdvo_connector->contrast)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->contrast);
-       if (psb_intel_sdvo_connector->hue)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->hue);
-       if (psb_intel_sdvo_connector->sharpness)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->sharpness);
-       if (psb_intel_sdvo_connector->flicker_filter)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter);
-       if (psb_intel_sdvo_connector->flicker_filter_2d)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_2d);
-       if (psb_intel_sdvo_connector->flicker_filter_adaptive)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_adaptive);
-       if (psb_intel_sdvo_connector->tv_luma_filter)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->tv_luma_filter);
-       if (psb_intel_sdvo_connector->tv_chroma_filter)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->tv_chroma_filter);
-       if (psb_intel_sdvo_connector->dot_crawl)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->dot_crawl);
-       if (psb_intel_sdvo_connector->brightness)
-               drm_property_destroy(dev, psb_intel_sdvo_connector->brightness);
-}
-
 static void psb_intel_sdvo_destroy(struct drm_connector *connector)
 {
-       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
-
-       if (psb_intel_sdvo_connector->tv_format)
-               drm_property_destroy(connector->dev,
-                                    psb_intel_sdvo_connector->tv_format);
-
-       psb_intel_sdvo_destroy_enhance_property(connector);
        drm_connector_unregister(connector);
        drm_connector_cleanup(connector);
        kfree(connector);
index 4d341db462a2442878991e45a3438d770f1afe64..22c7ed63a001df43190a26ffc070a69c183f729c 100644 (file)
@@ -1,6 +1,12 @@
 menu "I2C encoder or helper chips"
      depends on DRM && DRM_KMS_HELPER && I2C
 
+config DRM_I2C_ADV7511
+       tristate "AV7511 encoder"
+       select REGMAP_I2C
+       help
+         Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+
 config DRM_I2C_CH7006
        tristate "Chrontel ch7006 TV encoder"
        default m if DRM_NOUVEAU
index 43aa33baebed8e7888c07c0f818848cc1adb80f3..2c72eb584ab7c628254ecfec90d5de70773fab4e 100644 (file)
@@ -1,5 +1,7 @@
 ccflags-y := -Iinclude/drm
 
+obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
+
 ch7006-y := ch7006_drv.o ch7006_mode.o
 obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
 
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
new file mode 100644 (file)
index 0000000..faf1c0c
--- /dev/null
@@ -0,0 +1,1010 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+
+#include "adv7511.h"
+
+struct adv7511 {
+       struct i2c_client *i2c_main;
+       struct i2c_client *i2c_edid;
+
+       struct regmap *regmap;
+       struct regmap *packet_memory_regmap;
+       enum drm_connector_status status;
+       int dpms_mode;
+
+       unsigned int f_tmds;
+
+       unsigned int current_edid_segment;
+       uint8_t edid_buf[256];
+
+       wait_queue_head_t wq;
+       struct drm_encoder *encoder;
+
+       bool embedded_sync;
+       enum adv7511_sync_polarity vsync_polarity;
+       enum adv7511_sync_polarity hsync_polarity;
+       bool rgb;
+
+       struct edid *edid;
+
+       struct gpio_desc *gpio_pd;
+};
+
+static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
+{
+       return to_encoder_slave(encoder)->slave_priv;
+}
+
+/* ADI recommended values for proper operation. */
+static const struct reg_default adv7511_fixed_registers[] = {
+       { 0x98, 0x03 },
+       { 0x9a, 0xe0 },
+       { 0x9c, 0x30 },
+       { 0x9d, 0x61 },
+       { 0xa2, 0xa4 },
+       { 0xa3, 0xa4 },
+       { 0xe0, 0xd0 },
+       { 0xf9, 0x00 },
+       { 0x55, 0x02 },
+};
+
+/* -----------------------------------------------------------------------------
+ * Register access
+ */
+
+static const uint8_t adv7511_register_defaults[] = {
+       0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00 */
+       0x00, 0x00, 0x01, 0x0e, 0xbc, 0x18, 0x01, 0x13,
+       0x25, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10 */
+       0x46, 0x62, 0x04, 0xa8, 0x00, 0x00, 0x1c, 0x84,
+       0x1c, 0xbf, 0x04, 0xa8, 0x1e, 0x70, 0x02, 0x1e, /* 20 */
+       0x00, 0x00, 0x04, 0xa8, 0x08, 0x12, 0x1b, 0xac,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */
+       0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0,
+       0x00, 0x50, 0x90, 0x7e, 0x79, 0x70, 0x00, 0x00, /* 40 */
+       0x00, 0xa8, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x02, 0x0d, 0x00, 0x00, 0x00, 0x00, /* 50 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x01, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 90 */
+       0x0b, 0x02, 0x00, 0x18, 0x5a, 0x60, 0x00, 0x00,
+       0x00, 0x00, 0x80, 0x80, 0x08, 0x04, 0x00, 0x00, /* a0 */
+       0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x14,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* b0 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* c0 */
+       0x00, 0x03, 0x00, 0x00, 0x02, 0x00, 0x01, 0x04,
+       0x30, 0xff, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, /* d0 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01,
+       0x80, 0x75, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, /* e0 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x11, 0x00, /* f0 */
+       0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case ADV7511_REG_CHIP_REVISION:
+       case ADV7511_REG_SPDIF_FREQ:
+       case ADV7511_REG_CTS_AUTOMATIC1:
+       case ADV7511_REG_CTS_AUTOMATIC2:
+       case ADV7511_REG_VIC_DETECTED:
+       case ADV7511_REG_VIC_SEND:
+       case ADV7511_REG_AUX_VIC_DETECTED:
+       case ADV7511_REG_STATUS:
+       case ADV7511_REG_GC(1):
+       case ADV7511_REG_INT(0):
+       case ADV7511_REG_INT(1):
+       case ADV7511_REG_PLL_STATUS:
+       case ADV7511_REG_AN(0):
+       case ADV7511_REG_AN(1):
+       case ADV7511_REG_AN(2):
+       case ADV7511_REG_AN(3):
+       case ADV7511_REG_AN(4):
+       case ADV7511_REG_AN(5):
+       case ADV7511_REG_AN(6):
+       case ADV7511_REG_AN(7):
+       case ADV7511_REG_HDCP_STATUS:
+       case ADV7511_REG_BCAPS:
+       case ADV7511_REG_BKSV(0):
+       case ADV7511_REG_BKSV(1):
+       case ADV7511_REG_BKSV(2):
+       case ADV7511_REG_BKSV(3):
+       case ADV7511_REG_BKSV(4):
+       case ADV7511_REG_DDC_STATUS:
+       case ADV7511_REG_BSTATUS(0):
+       case ADV7511_REG_BSTATUS(1):
+       case ADV7511_REG_CHIP_ID_HIGH:
+       case ADV7511_REG_CHIP_ID_LOW:
+               return true;
+       }
+
+       return false;
+}
+
+static const struct regmap_config adv7511_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .max_register = 0xff,
+       .cache_type = REGCACHE_RBTREE,
+       .reg_defaults_raw = adv7511_register_defaults,
+       .num_reg_defaults_raw = ARRAY_SIZE(adv7511_register_defaults),
+
+       .volatile_reg = adv7511_register_volatile,
+};
+
+/* -----------------------------------------------------------------------------
+ * Hardware configuration
+ */
+
+static void adv7511_set_colormap(struct adv7511 *adv7511, bool enable,
+                                const uint16_t *coeff,
+                                unsigned int scaling_factor)
+{
+       unsigned int i;
+
+       regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
+                          ADV7511_CSC_UPDATE_MODE, ADV7511_CSC_UPDATE_MODE);
+
+       if (enable) {
+               for (i = 0; i < 12; ++i) {
+                       regmap_update_bits(adv7511->regmap,
+                                          ADV7511_REG_CSC_UPPER(i),
+                                          0x1f, coeff[i] >> 8);
+                       regmap_write(adv7511->regmap,
+                                    ADV7511_REG_CSC_LOWER(i),
+                                    coeff[i] & 0xff);
+               }
+       }
+
+       if (enable)
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
+                                  0xe0, 0x80 | (scaling_factor << 5));
+       else
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
+                                  0x80, 0x00);
+
+       regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
+                          ADV7511_CSC_UPDATE_MODE, 0);
+}
+
+static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
+{
+       if (packet & 0xff)
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
+                                  packet, 0xff);
+
+       if (packet & 0xff00) {
+               packet >>= 8;
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+                                  packet, 0xff);
+       }
+
+       return 0;
+}
+
+static int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
+{
+       if (packet & 0xff)
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
+                                  packet, 0x00);
+
+       if (packet & 0xff00) {
+               packet >>= 8;
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+                                  packet, 0x00);
+       }
+
+       return 0;
+}
+
+/* Coefficients for adv7511 color space conversion */
+static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
+       0x0734, 0x04ad, 0x0000, 0x1c1b,
+       0x1ddc, 0x04ad, 0x1f24, 0x0135,
+       0x0000, 0x04ad, 0x087c, 0x1b77,
+};
+
+static void adv7511_set_config_csc(struct adv7511 *adv7511,
+                                  struct drm_connector *connector,
+                                  bool rgb)
+{
+       struct adv7511_video_config config;
+       bool output_format_422, output_format_ycbcr;
+       unsigned int mode;
+       uint8_t infoframe[17];
+
+       if (adv7511->edid)
+               config.hdmi_mode = drm_detect_hdmi_monitor(adv7511->edid);
+       else
+               config.hdmi_mode = false;
+
+       hdmi_avi_infoframe_init(&config.avi_infoframe);
+
+       config.avi_infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
+
+       if (rgb) {
+               config.csc_enable = false;
+               config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+       } else {
+               config.csc_scaling_factor = ADV7511_CSC_SCALING_4;
+               config.csc_coefficents = adv7511_csc_ycbcr_to_rgb;
+
+               if ((connector->display_info.color_formats &
+                    DRM_COLOR_FORMAT_YCRCB422) &&
+                   config.hdmi_mode) {
+                       config.csc_enable = false;
+                       config.avi_infoframe.colorspace =
+                               HDMI_COLORSPACE_YUV422;
+               } else {
+                       config.csc_enable = true;
+                       config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+               }
+       }
+
+       if (config.hdmi_mode) {
+               mode = ADV7511_HDMI_CFG_MODE_HDMI;
+
+               switch (config.avi_infoframe.colorspace) {
+               case HDMI_COLORSPACE_YUV444:
+                       output_format_422 = false;
+                       output_format_ycbcr = true;
+                       break;
+               case HDMI_COLORSPACE_YUV422:
+                       output_format_422 = true;
+                       output_format_ycbcr = true;
+                       break;
+               default:
+                       output_format_422 = false;
+                       output_format_ycbcr = false;
+                       break;
+               }
+       } else {
+               mode = ADV7511_HDMI_CFG_MODE_DVI;
+               output_format_422 = false;
+               output_format_ycbcr = false;
+       }
+
+       adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+
+       adv7511_set_colormap(adv7511, config.csc_enable,
+                            config.csc_coefficents,
+                            config.csc_scaling_factor);
+
+       regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x81,
+                          (output_format_422 << 7) | output_format_ycbcr);
+
+       regmap_update_bits(adv7511->regmap, ADV7511_REG_HDCP_HDMI_CFG,
+                          ADV7511_HDMI_CFG_MODE_MASK, mode);
+
+       hdmi_avi_infoframe_pack(&config.avi_infoframe, infoframe,
+                               sizeof(infoframe));
+
+       /* The AVI infoframe id is not configurable */
+       regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
+                         infoframe + 1, sizeof(infoframe) - 1);
+
+       adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+}
+
+static void adv7511_set_link_config(struct adv7511 *adv7511,
+                                   const struct adv7511_link_config *config)
+{
+       /*
+        * The input style values documented in the datasheet don't match the
+        * hardware register field values :-(
+        */
+       static const unsigned int input_styles[4] = { 0, 2, 1, 3 };
+
+       unsigned int clock_delay;
+       unsigned int color_depth;
+       unsigned int input_id;
+
+       clock_delay = (config->clock_delay + 1200) / 400;
+       color_depth = config->input_color_depth == 8 ? 3
+                   : (config->input_color_depth == 10 ? 1 : 2);
+
+       /* TODO Support input ID 6 */
+       if (config->input_colorspace != HDMI_COLORSPACE_YUV422)
+               input_id = config->input_clock == ADV7511_INPUT_CLOCK_DDR
+                        ? 5 : 0;
+       else if (config->input_clock == ADV7511_INPUT_CLOCK_DDR)
+               input_id = config->embedded_sync ? 8 : 7;
+       else if (config->input_clock == ADV7511_INPUT_CLOCK_2X)
+               input_id = config->embedded_sync ? 4 : 3;
+       else
+               input_id = config->embedded_sync ? 2 : 1;
+
+       regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG, 0xf,
+                          input_id);
+       regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x7e,
+                          (color_depth << 4) |
+                          (input_styles[config->input_style] << 2));
+       regmap_write(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG2,
+                    config->input_justification << 3);
+       regmap_write(adv7511->regmap, ADV7511_REG_TIMING_GEN_SEQ,
+                    config->sync_pulse << 2);
+
+       regmap_write(adv7511->regmap, 0xba, clock_delay << 5);
+
+       adv7511->embedded_sync = config->embedded_sync;
+       adv7511->hsync_polarity = config->hsync_polarity;
+       adv7511->vsync_polarity = config->vsync_polarity;
+       adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt and hotplug detection
+ */
+
+static bool adv7511_hpd(struct adv7511 *adv7511)
+{
+       unsigned int irq0;
+       int ret;
+
+       ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+       if (ret < 0)
+               return false;
+
+       if (irq0 & ADV7511_INT0_HDP) {
+               regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+                            ADV7511_INT0_HDP);
+               return true;
+       }
+
+       return false;
+}
+
+static irqreturn_t adv7511_irq_handler(int irq, void *devid)
+{
+       struct adv7511 *adv7511 = devid;
+
+       if (adv7511_hpd(adv7511))
+               drm_helper_hpd_irq_event(adv7511->encoder->dev);
+
+       wake_up_all(&adv7511->wq);
+
+       return IRQ_HANDLED;
+}
+
+static unsigned int adv7511_is_interrupt_pending(struct adv7511 *adv7511,
+                                                unsigned int irq)
+{
+       unsigned int irq0, irq1;
+       unsigned int pending;
+       int ret;
+
+       ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+       if (ret < 0)
+               return 0;
+       ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
+       if (ret < 0)
+               return 0;
+
+       pending = (irq1 << 8) | irq0;
+
+       return pending & irq;
+}
+
+static int adv7511_wait_for_interrupt(struct adv7511 *adv7511, int irq,
+                                     int timeout)
+{
+       unsigned int pending;
+       int ret;
+
+       if (adv7511->i2c_main->irq) {
+               ret = wait_event_interruptible_timeout(adv7511->wq,
+                               adv7511_is_interrupt_pending(adv7511, irq),
+                               msecs_to_jiffies(timeout));
+               if (ret <= 0)
+                       return 0;
+               pending = adv7511_is_interrupt_pending(adv7511, irq);
+       } else {
+               if (timeout < 25)
+                       timeout = 25;
+               do {
+                       pending = adv7511_is_interrupt_pending(adv7511, irq);
+                       if (pending)
+                               break;
+                       msleep(25);
+                       timeout -= 25;
+               } while (timeout >= 25);
+       }
+
+       return pending;
+}
+
+/* -----------------------------------------------------------------------------
+ * EDID retrieval
+ */
+
+static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
+                                 size_t len)
+{
+       struct adv7511 *adv7511 = data;
+       struct i2c_msg xfer[2];
+       uint8_t offset;
+       unsigned int i;
+       int ret;
+
+       if (len > 128)
+               return -EINVAL;
+
+       if (adv7511->current_edid_segment != block / 2) {
+               unsigned int status;
+
+               ret = regmap_read(adv7511->regmap, ADV7511_REG_DDC_STATUS,
+                                 &status);
+               if (ret < 0)
+                       return ret;
+
+               if (status != 2) {
+                       regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
+                                    block);
+                       ret = adv7511_wait_for_interrupt(adv7511,
+                                       ADV7511_INT0_EDID_READY |
+                                       ADV7511_INT1_DDC_ERROR, 200);
+
+                       if (!(ret & ADV7511_INT0_EDID_READY))
+                               return -EIO;
+               }
+
+               regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+                            ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+
+               /* Break this apart, hopefully more I2C controllers will
+                * support 64 byte transfers than 256 byte transfers
+                */
+
+               xfer[0].addr = adv7511->i2c_edid->addr;
+               xfer[0].flags = 0;
+               xfer[0].len = 1;
+               xfer[0].buf = &offset;
+               xfer[1].addr = adv7511->i2c_edid->addr;
+               xfer[1].flags = I2C_M_RD;
+               xfer[1].len = 64;
+               xfer[1].buf = adv7511->edid_buf;
+
+               offset = 0;
+
+               for (i = 0; i < 4; ++i) {
+                       ret = i2c_transfer(adv7511->i2c_edid->adapter, xfer,
+                                          ARRAY_SIZE(xfer));
+                       if (ret < 0)
+                               return ret;
+                       else if (ret != 2)
+                               return -EIO;
+
+                       xfer[1].buf += 64;
+                       offset += 64;
+               }
+
+               adv7511->current_edid_segment = block / 2;
+       }
+
+       if (block % 2 == 0)
+               memcpy(buf, adv7511->edid_buf, len);
+       else
+               memcpy(buf, adv7511->edid_buf + 128, len);
+
+       return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Encoder operations
+ */
+
+static int adv7511_get_modes(struct drm_encoder *encoder,
+                            struct drm_connector *connector)
+{
+       struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+       struct edid *edid;
+       unsigned int count;
+
+       /* Reading the EDID only works if the device is powered */
+       if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) {
+               regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+                            ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+                                  ADV7511_POWER_POWER_DOWN, 0);
+               adv7511->current_edid_segment = -1;
+       }
+
+       edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
+
+       if (adv7511->dpms_mode != DRM_MODE_DPMS_ON)
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+                                  ADV7511_POWER_POWER_DOWN,
+                                  ADV7511_POWER_POWER_DOWN);
+
+       kfree(adv7511->edid);
+       adv7511->edid = edid;
+       if (!edid)
+               return 0;
+
+       drm_mode_connector_update_edid_property(connector, edid);
+       count = drm_add_edid_modes(connector, edid);
+
+       adv7511_set_config_csc(adv7511, connector, adv7511->rgb);
+
+       return count;
+}
+
+static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               adv7511->current_edid_segment = -1;
+
+               regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+                            ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+                                  ADV7511_POWER_POWER_DOWN, 0);
+               /*
+                * Per spec it is allowed to pulse the HDP signal to indicate
+                * that the EDID information has changed. Some monitors do this
+                * when they wakeup from standby or are enabled. When the HDP
+                * goes low the adv7511 is reset and the outputs are disabled
+                * which might cause the monitor to go to standby again. To
+                * avoid this we ignore the HDP pin for the first few seconds
+                * after enabeling the output.
+                */
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+                                  ADV7511_REG_POWER2_HDP_SRC_MASK,
+                                  ADV7511_REG_POWER2_HDP_SRC_NONE);
+               /* Most of the registers are reset during power down or
+                * when HPD is low
+                */
+               regcache_sync(adv7511->regmap);
+               break;
+       default:
+               /* TODO: setup additional power down modes */
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+                                  ADV7511_POWER_POWER_DOWN,
+                                  ADV7511_POWER_POWER_DOWN);
+               regcache_mark_dirty(adv7511->regmap);
+               break;
+       }
+
+       adv7511->dpms_mode = mode;
+}
+
+static enum drm_connector_status
+adv7511_encoder_detect(struct drm_encoder *encoder,
+                      struct drm_connector *connector)
+{
+       struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+       enum drm_connector_status status;
+       unsigned int val;
+       bool hpd;
+       int ret;
+
+       ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
+       if (ret < 0)
+               return connector_status_disconnected;
+
+       if (val & ADV7511_STATUS_HPD)
+               status = connector_status_connected;
+       else
+               status = connector_status_disconnected;
+
+       hpd = adv7511_hpd(adv7511);
+
+       /* The chip resets itself when the cable is disconnected, so in case
+        * there is a pending HPD interrupt and the cable is connected there was
+        * at least one transition from disconnected to connected and the chip
+        * has to be reinitialized. */
+       if (status == connector_status_connected && hpd &&
+           adv7511->dpms_mode == DRM_MODE_DPMS_ON) {
+               regcache_mark_dirty(adv7511->regmap);
+               adv7511_encoder_dpms(encoder, adv7511->dpms_mode);
+               adv7511_get_modes(encoder, connector);
+               if (adv7511->status == connector_status_connected)
+                       status = connector_status_disconnected;
+       } else {
+               /* Renable HDP sensing */
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+                                  ADV7511_REG_POWER2_HDP_SRC_MASK,
+                                  ADV7511_REG_POWER2_HDP_SRC_BOTH);
+       }
+
+       adv7511->status = status;
+       return status;
+}
+
+static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
+                                     struct drm_display_mode *mode)
+{
+       if (mode->clock > 165000)
+               return MODE_CLOCK_HIGH;
+
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               return MODE_NO_INTERLACE;
+
+       return MODE_OK;
+}
+
+static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
+                                    struct drm_display_mode *mode,
+                                    struct drm_display_mode *adj_mode)
+{
+       struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+       unsigned int low_refresh_rate;
+       unsigned int hsync_polarity = 0;
+       unsigned int vsync_polarity = 0;
+
+       if (adv7511->embedded_sync) {
+               unsigned int hsync_offset, hsync_len;
+               unsigned int vsync_offset, vsync_len;
+
+               hsync_offset = adj_mode->crtc_hsync_start -
+                              adj_mode->crtc_hdisplay;
+               vsync_offset = adj_mode->crtc_vsync_start -
+                              adj_mode->crtc_vdisplay;
+               hsync_len = adj_mode->crtc_hsync_end -
+                           adj_mode->crtc_hsync_start;
+               vsync_len = adj_mode->crtc_vsync_end -
+                           adj_mode->crtc_vsync_start;
+
+               /* The hardware vsync generator has a off-by-one bug */
+               vsync_offset += 1;
+
+               regmap_write(adv7511->regmap, ADV7511_REG_HSYNC_PLACEMENT_MSB,
+                            ((hsync_offset >> 10) & 0x7) << 5);
+               regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(0),
+                            (hsync_offset >> 2) & 0xff);
+               regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(1),
+                            ((hsync_offset & 0x3) << 6) |
+                            ((hsync_len >> 4) & 0x3f));
+               regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(2),
+                            ((hsync_len & 0xf) << 4) |
+                            ((vsync_offset >> 6) & 0xf));
+               regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(3),
+                            ((vsync_offset & 0x3f) << 2) |
+                            ((vsync_len >> 8) & 0x3));
+               regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(4),
+                            vsync_len & 0xff);
+
+               hsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PHSYNC);
+               vsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PVSYNC);
+       } else {
+               enum adv7511_sync_polarity mode_hsync_polarity;
+               enum adv7511_sync_polarity mode_vsync_polarity;
+
+               /**
+                * If the input signal is always low or always high we want to
+                * invert or let it passthrough depending on the polarity of the
+                * current mode.
+                **/
+               if (adj_mode->flags & DRM_MODE_FLAG_NHSYNC)
+                       mode_hsync_polarity = ADV7511_SYNC_POLARITY_LOW;
+               else
+                       mode_hsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
+
+               if (adj_mode->flags & DRM_MODE_FLAG_NVSYNC)
+                       mode_vsync_polarity = ADV7511_SYNC_POLARITY_LOW;
+               else
+                       mode_vsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
+
+               if (adv7511->hsync_polarity != mode_hsync_polarity &&
+                   adv7511->hsync_polarity !=
+                   ADV7511_SYNC_POLARITY_PASSTHROUGH)
+                       hsync_polarity = 1;
+
+               if (adv7511->vsync_polarity != mode_vsync_polarity &&
+                   adv7511->vsync_polarity !=
+                   ADV7511_SYNC_POLARITY_PASSTHROUGH)
+                       vsync_polarity = 1;
+       }
+
+       if (mode->vrefresh <= 24000)
+               low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
+       else if (mode->vrefresh <= 25000)
+               low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
+       else if (mode->vrefresh <= 30000)
+               low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
+       else
+               low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
+
+       regmap_update_bits(adv7511->regmap, 0xfb,
+               0x6, low_refresh_rate << 1);
+       regmap_update_bits(adv7511->regmap, 0x17,
+               0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
+
+       /*
+        * TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
+        * supposed to give better results.
+        */
+
+       adv7511->f_tmds = mode->clock;
+}
+
+static struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
+       .dpms = adv7511_encoder_dpms,
+       .mode_valid = adv7511_encoder_mode_valid,
+       .mode_set = adv7511_encoder_mode_set,
+       .detect = adv7511_encoder_detect,
+       .get_modes = adv7511_get_modes,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & remove
+ */
+
+static int adv7511_parse_dt(struct device_node *np,
+                           struct adv7511_link_config *config)
+{
+       const char *str;
+       int ret;
+
+       memset(config, 0, sizeof(*config));
+
+       of_property_read_u32(np, "adi,input-depth", &config->input_color_depth);
+       if (config->input_color_depth != 8 && config->input_color_depth != 10 &&
+           config->input_color_depth != 12)
+               return -EINVAL;
+
+       ret = of_property_read_string(np, "adi,input-colorspace", &str);
+       if (ret < 0)
+               return ret;
+
+       if (!strcmp(str, "rgb"))
+               config->input_colorspace = HDMI_COLORSPACE_RGB;
+       else if (!strcmp(str, "yuv422"))
+               config->input_colorspace = HDMI_COLORSPACE_YUV422;
+       else if (!strcmp(str, "yuv444"))
+               config->input_colorspace = HDMI_COLORSPACE_YUV444;
+       else
+               return -EINVAL;
+
+       ret = of_property_read_string(np, "adi,input-clock", &str);
+       if (ret < 0)
+               return ret;
+
+       if (!strcmp(str, "1x"))
+               config->input_clock = ADV7511_INPUT_CLOCK_1X;
+       else if (!strcmp(str, "2x"))
+               config->input_clock = ADV7511_INPUT_CLOCK_2X;
+       else if (!strcmp(str, "ddr"))
+               config->input_clock = ADV7511_INPUT_CLOCK_DDR;
+       else
+               return -EINVAL;
+
+       if (config->input_colorspace == HDMI_COLORSPACE_YUV422 ||
+           config->input_clock != ADV7511_INPUT_CLOCK_1X) {
+               ret = of_property_read_u32(np, "adi,input-style",
+                                          &config->input_style);
+               if (ret)
+                       return ret;
+
+               if (config->input_style < 1 || config->input_style > 3)
+                       return -EINVAL;
+
+               ret = of_property_read_string(np, "adi,input-justification",
+                                             &str);
+               if (ret < 0)
+                       return ret;
+
+               if (!strcmp(str, "left"))
+                       config->input_justification =
+                               ADV7511_INPUT_JUSTIFICATION_LEFT;
+               else if (!strcmp(str, "evenly"))
+                       config->input_justification =
+                               ADV7511_INPUT_JUSTIFICATION_EVENLY;
+               else if (!strcmp(str, "right"))
+                       config->input_justification =
+                               ADV7511_INPUT_JUSTIFICATION_RIGHT;
+               else
+                       return -EINVAL;
+
+       } else {
+               config->input_style = 1;
+               config->input_justification = ADV7511_INPUT_JUSTIFICATION_LEFT;
+       }
+
+       of_property_read_u32(np, "adi,clock-delay", &config->clock_delay);
+       if (config->clock_delay < -1200 || config->clock_delay > 1600)
+               return -EINVAL;
+
+       config->embedded_sync = of_property_read_bool(np, "adi,embedded-sync");
+
+       /* Hardcode the sync pulse configurations for now. */
+       config->sync_pulse = ADV7511_INPUT_SYNC_PULSE_NONE;
+       config->vsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
+       config->hsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
+
+       return 0;
+}
+
+static const int edid_i2c_addr = 0x7e;
+static const int packet_i2c_addr = 0x70;
+static const int cec_i2c_addr = 0x78;
+
+static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+{
+       struct adv7511_link_config link_config;
+       struct adv7511 *adv7511;
+       struct device *dev = &i2c->dev;
+       unsigned int val;
+       int ret;
+
+       if (!dev->of_node)
+               return -EINVAL;
+
+       adv7511 = devm_kzalloc(dev, sizeof(*adv7511), GFP_KERNEL);
+       if (!adv7511)
+               return -ENOMEM;
+
+       adv7511->dpms_mode = DRM_MODE_DPMS_OFF;
+       adv7511->status = connector_status_disconnected;
+
+       ret = adv7511_parse_dt(dev->of_node, &link_config);
+       if (ret)
+               return ret;
+
+       /*
+        * The power down GPIO is optional. If present, toggle it from active to
+        * inactive to wake up the encoder.
+        */
+       adv7511->gpio_pd = devm_gpiod_get_optional(dev, "pd", GPIOD_OUT_HIGH);
+       if (IS_ERR(adv7511->gpio_pd))
+               return PTR_ERR(adv7511->gpio_pd);
+
+       if (adv7511->gpio_pd) {
+               mdelay(5);
+               gpiod_set_value_cansleep(adv7511->gpio_pd, 0);
+       }
+
+       adv7511->regmap = devm_regmap_init_i2c(i2c, &adv7511_regmap_config);
+       if (IS_ERR(adv7511->regmap))
+               return PTR_ERR(adv7511->regmap);
+
+       ret = regmap_read(adv7511->regmap, ADV7511_REG_CHIP_REVISION, &val);
+       if (ret)
+               return ret;
+       dev_dbg(dev, "Rev. %d\n", val);
+
+       ret = regmap_register_patch(adv7511->regmap, adv7511_fixed_registers,
+                                   ARRAY_SIZE(adv7511_fixed_registers));
+       if (ret)
+               return ret;
+
+       regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
+       regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
+                    packet_i2c_addr);
+       regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, cec_i2c_addr);
+       adv7511_packet_disable(adv7511, 0xffff);
+
+       adv7511->i2c_main = i2c;
+       adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1);
+       if (!adv7511->i2c_edid)
+               return -ENOMEM;
+
+       if (i2c->irq) {
+               init_waitqueue_head(&adv7511->wq);
+
+               ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
+                                               adv7511_irq_handler,
+                                               IRQF_ONESHOT, dev_name(dev),
+                                               adv7511);
+               if (ret)
+                       goto err_i2c_unregister_device;
+       }
+
+       /* CEC is unused for now */
+       regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
+                    ADV7511_CEC_CTRL_POWER_DOWN);
+
+       regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+                          ADV7511_POWER_POWER_DOWN, ADV7511_POWER_POWER_DOWN);
+
+       adv7511->current_edid_segment = -1;
+
+       i2c_set_clientdata(i2c, adv7511);
+
+       adv7511_set_link_config(adv7511, &link_config);
+
+       return 0;
+
+err_i2c_unregister_device:
+       i2c_unregister_device(adv7511->i2c_edid);
+
+       return ret;
+}
+
+static int adv7511_remove(struct i2c_client *i2c)
+{
+       struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+
+       i2c_unregister_device(adv7511->i2c_edid);
+
+       kfree(adv7511->edid);
+
+       return 0;
+}
+
+static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev,
+                               struct drm_encoder_slave *encoder)
+{
+
+       struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+
+       encoder->slave_priv = adv7511;
+       encoder->slave_funcs = &adv7511_encoder_funcs;
+
+       adv7511->encoder = &encoder->base;
+
+       return 0;
+}
+
+static const struct i2c_device_id adv7511_i2c_ids[] = {
+       { "adv7511", 0 },
+       { "adv7511w", 0 },
+       { "adv7513", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
+
+static const struct of_device_id adv7511_of_ids[] = {
+       { .compatible = "adi,adv7511", },
+       { .compatible = "adi,adv7511w", },
+       { .compatible = "adi,adv7513", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, adv7511_of_ids);
+
+static struct drm_i2c_encoder_driver adv7511_driver = {
+       .i2c_driver = {
+               .driver = {
+                       .name = "adv7511",
+                       .of_match_table = adv7511_of_ids,
+               },
+               .id_table = adv7511_i2c_ids,
+               .probe = adv7511_probe,
+               .remove = adv7511_remove,
+       },
+
+       .encoder_init = adv7511_encoder_init,
+};
+
+static int __init adv7511_init(void)
+{
+       return drm_i2c_encoder_register(THIS_MODULE, &adv7511_driver);
+}
+module_init(adv7511_init);
+
+static void __exit adv7511_exit(void)
+{
+       drm_i2c_encoder_unregister(&adv7511_driver);
+}
+module_exit(adv7511_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("ADV7511 HDMI transmitter driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/i2c/adv7511.h
new file mode 100644 (file)
index 0000000..6599ed5
--- /dev/null
@@ -0,0 +1,289 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __DRM_I2C_ADV7511_H__
+#define __DRM_I2C_ADV7511_H__
+
+#include <linux/hdmi.h>
+
+#define ADV7511_REG_CHIP_REVISION              0x00
+#define ADV7511_REG_N0                         0x01
+#define ADV7511_REG_N1                         0x02
+#define ADV7511_REG_N2                         0x03
+#define ADV7511_REG_SPDIF_FREQ                 0x04
+#define ADV7511_REG_CTS_AUTOMATIC1             0x05
+#define ADV7511_REG_CTS_AUTOMATIC2             0x06
+#define ADV7511_REG_CTS_MANUAL0                        0x07
+#define ADV7511_REG_CTS_MANUAL1                        0x08
+#define ADV7511_REG_CTS_MANUAL2                        0x09
+#define ADV7511_REG_AUDIO_SOURCE               0x0a
+#define ADV7511_REG_AUDIO_CONFIG               0x0b
+#define ADV7511_REG_I2S_CONFIG                 0x0c
+#define ADV7511_REG_I2S_WIDTH                  0x0d
+#define ADV7511_REG_AUDIO_SUB_SRC0             0x0e
+#define ADV7511_REG_AUDIO_SUB_SRC1             0x0f
+#define ADV7511_REG_AUDIO_SUB_SRC2             0x10
+#define ADV7511_REG_AUDIO_SUB_SRC3             0x11
+#define ADV7511_REG_AUDIO_CFG1                 0x12
+#define ADV7511_REG_AUDIO_CFG2                 0x13
+#define ADV7511_REG_AUDIO_CFG3                 0x14
+#define ADV7511_REG_I2C_FREQ_ID_CFG            0x15
+#define ADV7511_REG_VIDEO_INPUT_CFG1           0x16
+#define ADV7511_REG_CSC_UPPER(x)               (0x18 + (x) * 2)
+#define ADV7511_REG_CSC_LOWER(x)               (0x19 + (x) * 2)
+#define ADV7511_REG_SYNC_DECODER(x)            (0x30 + (x))
+#define ADV7511_REG_DE_GENERATOR               (0x35 + (x))
+#define ADV7511_REG_PIXEL_REPETITION           0x3b
+#define ADV7511_REG_VIC_MANUAL                 0x3c
+#define ADV7511_REG_VIC_SEND                   0x3d
+#define ADV7511_REG_VIC_DETECTED               0x3e
+#define ADV7511_REG_AUX_VIC_DETECTED           0x3f
+#define ADV7511_REG_PACKET_ENABLE0             0x40
+#define ADV7511_REG_POWER                      0x41
+#define ADV7511_REG_STATUS                     0x42
+#define ADV7511_REG_EDID_I2C_ADDR              0x43
+#define ADV7511_REG_PACKET_ENABLE1             0x44
+#define ADV7511_REG_PACKET_I2C_ADDR            0x45
+#define ADV7511_REG_DSD_ENABLE                 0x46
+#define ADV7511_REG_VIDEO_INPUT_CFG2           0x48
+#define ADV7511_REG_INFOFRAME_UPDATE           0x4a
+#define ADV7511_REG_GC(x)                      (0x4b + (x)) /* 0x4b - 0x51 */
+#define ADV7511_REG_AVI_INFOFRAME_VERSION      0x52
+#define ADV7511_REG_AVI_INFOFRAME_LENGTH       0x53
+#define ADV7511_REG_AVI_INFOFRAME_CHECKSUM     0x54
+#define ADV7511_REG_AVI_INFOFRAME(x)           (0x55 + (x)) /* 0x55 - 0x6f */
+#define ADV7511_REG_AUDIO_INFOFRAME_VERSION    0x70
+#define ADV7511_REG_AUDIO_INFOFRAME_LENGTH     0x71
+#define ADV7511_REG_AUDIO_INFOFRAME_CHECKSUM   0x72
+#define ADV7511_REG_AUDIO_INFOFRAME(x)         (0x73 + (x)) /* 0x73 - 0x7c */
+#define ADV7511_REG_INT_ENABLE(x)              (0x94 + (x))
+#define ADV7511_REG_INT(x)                     (0x96 + (x))
+#define ADV7511_REG_INPUT_CLK_DIV              0x9d
+#define ADV7511_REG_PLL_STATUS                 0x9e
+#define ADV7511_REG_HDMI_POWER                 0xa1
+#define ADV7511_REG_HDCP_HDMI_CFG              0xaf
+#define ADV7511_REG_AN(x)                      (0xb0 + (x)) /* 0xb0 - 0xb7 */
+#define ADV7511_REG_HDCP_STATUS                        0xb8
+#define ADV7511_REG_BCAPS                      0xbe
+#define ADV7511_REG_BKSV(x)                    (0xc0 + (x)) /* 0xc0 - 0xc3 */
+#define ADV7511_REG_EDID_SEGMENT               0xc4
+#define ADV7511_REG_DDC_STATUS                 0xc8
+#define ADV7511_REG_EDID_READ_CTRL             0xc9
+#define ADV7511_REG_BSTATUS(x)                 (0xca + (x)) /* 0xca - 0xcb */
+#define ADV7511_REG_TIMING_GEN_SEQ             0xd0
+#define ADV7511_REG_POWER2                     0xd6
+#define ADV7511_REG_HSYNC_PLACEMENT_MSB                0xfa
+
+#define ADV7511_REG_SYNC_ADJUSTMENT(x)         (0xd7 + (x)) /* 0xd7 - 0xdc */
+#define ADV7511_REG_TMDS_CLOCK_INV             0xde
+#define ADV7511_REG_ARC_CTRL                   0xdf
+#define ADV7511_REG_CEC_I2C_ADDR               0xe1
+#define ADV7511_REG_CEC_CTRL                   0xe2
+#define ADV7511_REG_CHIP_ID_HIGH               0xf5
+#define ADV7511_REG_CHIP_ID_LOW                        0xf6
+
+#define ADV7511_CSC_ENABLE                     BIT(7)
+#define ADV7511_CSC_UPDATE_MODE                        BIT(5)
+
+#define ADV7511_INT0_HDP                       BIT(7)
+#define ADV7511_INT0_VSYNC                     BIT(5)
+#define ADV7511_INT0_AUDIO_FIFO_FULL           BIT(4)
+#define ADV7511_INT0_EDID_READY                        BIT(2)
+#define ADV7511_INT0_HDCP_AUTHENTICATED                BIT(1)
+
+#define ADV7511_INT1_DDC_ERROR                 BIT(7)
+#define ADV7511_INT1_BKSV                      BIT(6)
+#define ADV7511_INT1_CEC_TX_READY              BIT(5)
+#define ADV7511_INT1_CEC_TX_ARBIT_LOST         BIT(4)
+#define ADV7511_INT1_CEC_TX_RETRY_TIMEOUT      BIT(3)
+#define ADV7511_INT1_CEC_RX_READY3             BIT(2)
+#define ADV7511_INT1_CEC_RX_READY2             BIT(1)
+#define ADV7511_INT1_CEC_RX_READY1             BIT(0)
+
+#define ADV7511_ARC_CTRL_POWER_DOWN            BIT(0)
+
+#define ADV7511_CEC_CTRL_POWER_DOWN            BIT(0)
+
+#define ADV7511_POWER_POWER_DOWN               BIT(6)
+
+#define ADV7511_HDMI_CFG_MODE_MASK             0x2
+#define ADV7511_HDMI_CFG_MODE_DVI              0x0
+#define ADV7511_HDMI_CFG_MODE_HDMI             0x2
+
+#define ADV7511_AUDIO_SELECT_I2C               0x0
+#define ADV7511_AUDIO_SELECT_SPDIF             0x1
+#define ADV7511_AUDIO_SELECT_DSD               0x2
+#define ADV7511_AUDIO_SELECT_HBR               0x3
+#define ADV7511_AUDIO_SELECT_DST               0x4
+
+#define ADV7511_I2S_SAMPLE_LEN_16              0x2
+#define ADV7511_I2S_SAMPLE_LEN_20              0x3
+#define ADV7511_I2S_SAMPLE_LEN_18              0x4
+#define ADV7511_I2S_SAMPLE_LEN_22              0x5
+#define ADV7511_I2S_SAMPLE_LEN_19              0x8
+#define ADV7511_I2S_SAMPLE_LEN_23              0x9
+#define ADV7511_I2S_SAMPLE_LEN_24              0xb
+#define ADV7511_I2S_SAMPLE_LEN_17              0xc
+#define ADV7511_I2S_SAMPLE_LEN_21              0xd
+
+#define ADV7511_SAMPLE_FREQ_44100              0x0
+#define ADV7511_SAMPLE_FREQ_48000              0x2
+#define ADV7511_SAMPLE_FREQ_32000              0x3
+#define ADV7511_SAMPLE_FREQ_88200              0x8
+#define ADV7511_SAMPLE_FREQ_96000              0xa
+#define ADV7511_SAMPLE_FREQ_176400             0xc
+#define ADV7511_SAMPLE_FREQ_192000             0xe
+
+#define ADV7511_STATUS_POWER_DOWN_POLARITY     BIT(7)
+#define ADV7511_STATUS_HPD                     BIT(6)
+#define ADV7511_STATUS_MONITOR_SENSE           BIT(5)
+#define ADV7511_STATUS_I2S_32BIT_MODE          BIT(3)
+
+#define ADV7511_PACKET_ENABLE_N_CTS            BIT(8+6)
+#define ADV7511_PACKET_ENABLE_AUDIO_SAMPLE     BIT(8+5)
+#define ADV7511_PACKET_ENABLE_AVI_INFOFRAME    BIT(8+4)
+#define ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME  BIT(8+3)
+#define ADV7511_PACKET_ENABLE_GC               BIT(7)
+#define ADV7511_PACKET_ENABLE_SPD              BIT(6)
+#define ADV7511_PACKET_ENABLE_MPEG             BIT(5)
+#define ADV7511_PACKET_ENABLE_ACP              BIT(4)
+#define ADV7511_PACKET_ENABLE_ISRC             BIT(3)
+#define ADV7511_PACKET_ENABLE_GM               BIT(2)
+#define ADV7511_PACKET_ENABLE_SPARE2           BIT(1)
+#define ADV7511_PACKET_ENABLE_SPARE1           BIT(0)
+
+#define ADV7511_REG_POWER2_HDP_SRC_MASK                0xc0
+#define ADV7511_REG_POWER2_HDP_SRC_BOTH                0x00
+#define ADV7511_REG_POWER2_HDP_SRC_HDP         0x40
+#define ADV7511_REG_POWER2_HDP_SRC_CEC         0x80
+#define ADV7511_REG_POWER2_HDP_SRC_NONE                0xc0
+#define ADV7511_REG_POWER2_TDMS_ENABLE         BIT(4)
+#define ADV7511_REG_POWER2_GATE_INPUT_CLK      BIT(0)
+
+#define ADV7511_LOW_REFRESH_RATE_NONE          0x0
+#define ADV7511_LOW_REFRESH_RATE_24HZ          0x1
+#define ADV7511_LOW_REFRESH_RATE_25HZ          0x2
+#define ADV7511_LOW_REFRESH_RATE_30HZ          0x3
+
+#define ADV7511_AUDIO_CFG3_LEN_MASK            0x0f
+#define ADV7511_I2C_FREQ_ID_CFG_RATE_MASK      0xf0
+
+#define ADV7511_AUDIO_SOURCE_I2S               0
+#define ADV7511_AUDIO_SOURCE_SPDIF             1
+
+#define ADV7511_I2S_FORMAT_I2S                 0
+#define ADV7511_I2S_FORMAT_RIGHT_J             1
+#define ADV7511_I2S_FORMAT_LEFT_J              2
+
+#define ADV7511_PACKET(p, x)       ((p) * 0x20 + (x))
+#define ADV7511_PACKET_SDP(x)      ADV7511_PACKET(0, x)
+#define ADV7511_PACKET_MPEG(x)     ADV7511_PACKET(1, x)
+#define ADV7511_PACKET_ACP(x)      ADV7511_PACKET(2, x)
+#define ADV7511_PACKET_ISRC1(x)            ADV7511_PACKET(3, x)
+#define ADV7511_PACKET_ISRC2(x)            ADV7511_PACKET(4, x)
+#define ADV7511_PACKET_GM(x)       ADV7511_PACKET(5, x)
+#define ADV7511_PACKET_SPARE(x)            ADV7511_PACKET(6, x)
+
+enum adv7511_input_clock {
+       ADV7511_INPUT_CLOCK_1X,
+       ADV7511_INPUT_CLOCK_2X,
+       ADV7511_INPUT_CLOCK_DDR,
+};
+
+enum adv7511_input_justification {
+       ADV7511_INPUT_JUSTIFICATION_EVENLY = 0,
+       ADV7511_INPUT_JUSTIFICATION_RIGHT = 1,
+       ADV7511_INPUT_JUSTIFICATION_LEFT = 2,
+};
+
+enum adv7511_input_sync_pulse {
+       ADV7511_INPUT_SYNC_PULSE_DE = 0,
+       ADV7511_INPUT_SYNC_PULSE_HSYNC = 1,
+       ADV7511_INPUT_SYNC_PULSE_VSYNC = 2,
+       ADV7511_INPUT_SYNC_PULSE_NONE = 3,
+};
+
+/**
+ * enum adv7511_sync_polarity - Polarity for the input sync signals
+ * @ADV7511_SYNC_POLARITY_PASSTHROUGH:  Sync polarity matches that of
+ *                                    the currently configured mode.
+ * @ADV7511_SYNC_POLARITY_LOW:     Sync polarity is low
+ * @ADV7511_SYNC_POLARITY_HIGH:            Sync polarity is high
+ *
+ * If the polarity is set to either LOW or HIGH the driver will configure the
+ * ADV7511 to internally invert the sync signal if required to match the sync
+ * polarity setting for the currently selected output mode.
+ *
+ * If the polarity is set to PASSTHROUGH, the ADV7511 will route the signal
+ * unchanged. This is used when the upstream graphics core already generates
+ * the sync signals with the correct polarity.
+ */
+enum adv7511_sync_polarity {
+       ADV7511_SYNC_POLARITY_PASSTHROUGH,
+       ADV7511_SYNC_POLARITY_LOW,
+       ADV7511_SYNC_POLARITY_HIGH,
+};
+
+/**
+ * struct adv7511_link_config - Describes adv7511 hardware configuration
+ * @input_color_depth:         Number of bits per color component (8, 10 or 12)
+ * @input_colorspace:          The input colorspace (RGB, YUV444, YUV422)
+ * @input_clock:               The input video clock style (1x, 2x, DDR)
+ * @input_style:               The input component arrangement variant
+ * @input_justification:       Video input format bit justification
+ * @clock_delay:               Clock delay for the input clock (in ps)
+ * @embedded_sync:             Video input uses BT.656-style embedded sync
+ * @sync_pulse:                        Select the sync pulse
+ * @vsync_polarity:            vsync input signal configuration
+ * @hsync_polarity:            hsync input signal configuration
+ */
+struct adv7511_link_config {
+       unsigned int input_color_depth;
+       enum hdmi_colorspace input_colorspace;
+       enum adv7511_input_clock input_clock;
+       unsigned int input_style;
+       enum adv7511_input_justification input_justification;
+
+       int clock_delay;
+
+       bool embedded_sync;
+       enum adv7511_input_sync_pulse sync_pulse;
+       enum adv7511_sync_polarity vsync_polarity;
+       enum adv7511_sync_polarity hsync_polarity;
+};
+
+/**
+ * enum adv7511_csc_scaling - Scaling factor for the ADV7511 CSC
+ * @ADV7511_CSC_SCALING_1: CSC results are not scaled
+ * @ADV7511_CSC_SCALING_2: CSC results are scaled by a factor of two
+ * @ADV7511_CSC_SCALING_4: CSC results are scalled by a factor of four
+ */
+enum adv7511_csc_scaling {
+       ADV7511_CSC_SCALING_1 = 0,
+       ADV7511_CSC_SCALING_2 = 1,
+       ADV7511_CSC_SCALING_4 = 2,
+};
+
+/**
+ * struct adv7511_video_config - Describes adv7511 hardware configuration
+ * @csc_enable:                        Whether to enable color space conversion
+ * @csc_scaling_factor:                Color space conversion scaling factor
+ * @csc_coefficents:           Color space conversion coefficents
+ * @hdmi_mode:                 Whether to use HDMI or DVI output mode
+ * @avi_infoframe:             HDMI infoframe
+ */
+struct adv7511_video_config {
+       bool csc_enable;
+       enum adv7511_csc_scaling csc_scaling_factor;
+       const uint16_t *csc_coefficents;
+
+       bool hdmi_mode;
+       struct hdmi_avi_infoframe avi_infoframe;
+};
+
+#endif /* __DRM_I2C_ADV7511_H__ */
index c1dd485aeb6c255230c83e7fea876fb79fed3308..e4083e41a600e264599cf607a5f419d60fd21383 100644 (file)
@@ -11,7 +11,9 @@ i915-y := i915_drv.o \
          i915_params.o \
           i915_suspend.o \
          i915_sysfs.o \
-         intel_pm.o
+         intel_pm.o \
+         intel_runtime_pm.o
+
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
 
@@ -38,13 +40,18 @@ i915-y += i915_cmd_parser.o \
 # autogenerated null render state
 i915-y += intel_renderstate_gen6.o \
          intel_renderstate_gen7.o \
-         intel_renderstate_gen8.o
+         intel_renderstate_gen8.o \
+         intel_renderstate_gen9.o
 
 # modesetting core code
-i915-y += intel_bios.o \
+i915-y += intel_audio.o \
+         intel_bios.o \
          intel_display.o \
+         intel_fifo_underrun.o \
+         intel_frontbuffer.o \
          intel_modes.o \
          intel_overlay.o \
+         intel_psr.o \
          intel_sideband.o \
          intel_sprite.o
 i915-$(CONFIG_ACPI)            += intel_acpi.o intel_opregion.o
index 593b657d3e59e21d3e73dac35086a64a850f8b69..22c992a78ac6a86dd976bbe106c140fa61814b67 100644 (file)
@@ -73,7 +73,7 @@
  * those commands required by the parser. This generally works because command
  * opcode ranges have standard command length encodings. So for commands that
  * the parser does not need to check, it can easily skip them. This is
- * implementated via a per-ring length decoding vfunc.
+ * implemented via a per-ring length decoding vfunc.
  *
  * Unfortunately, there are a number of commands that do not follow the standard
  * length encoding for their opcode range, primarily amongst the MI_* commands.
@@ -138,6 +138,11 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
                        .mask = MI_GLOBAL_GTT,
                        .expected = 0,
              }},                                                      ),
+       /*
+        * MI_BATCH_BUFFER_START requires some special handling. It's not
+        * really a 'skip' action but it doesn't seem like it's worth adding
+        * a new action. See i915_parse_cmds().
+        */
        CMD(  MI_BATCH_BUFFER_START,            SMI,   !F,  0xFF,   S  ),
 };
 
@@ -408,6 +413,8 @@ static const u32 gen7_render_regs[] = {
        REG64(PS_INVOCATION_COUNT),
        REG64(PS_DEPTH_COUNT),
        OACONTROL, /* Only allowed for LRI and SRM. See below. */
+       REG64(MI_PREDICATE_SRC0),
+       REG64(MI_PREDICATE_SRC1),
        GEN7_3DPRIM_END_OFFSET,
        GEN7_3DPRIM_START_VERTEX,
        GEN7_3DPRIM_VERTEX_COUNT,
@@ -838,7 +845,7 @@ finish:
  * @ring: the ring in question
  *
  * Only certain platforms require software batch buffer command parsing, and
- * only when enabled via module paramter.
+ * only when enabled via module parameter.
  *
  * Return: true if the ring requires software command parsing
  */
@@ -847,12 +854,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
        if (!ring->needs_cmd_parser)
                return false;
 
-       /*
-        * XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
-        * disabled. That will cause all of the parser's PPGTT checks to
-        * fail. For now, disable parsing when PPGTT is off.
-        */
-       if (USES_PPGTT(ring->dev))
+       if (!USES_PPGTT(ring->dev))
                return false;
 
        return (i915.enable_cmd_parser == 1);
@@ -888,8 +890,10 @@ static bool check_cmd(const struct intel_engine_cs *ring,
                 * OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
                 */
                if (reg_addr == OACONTROL) {
-                       if (desc->cmd.value == MI_LOAD_REGISTER_MEM)
+                       if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+                               DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
                                return false;
+                       }
 
                        if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
                                *oacontrol_set = (cmd[2] != 0);
@@ -958,7 +962,8 @@ static bool check_cmd(const struct intel_engine_cs *ring,
  * Parses the specified batch buffer looking for privilege violations as
  * described in the overview.
  *
- * Return: non-zero if the parser finds violations or otherwise fails
+ * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
+ * if the batch appears legal but should use hardware parsing
  */
 int i915_parse_cmds(struct intel_engine_cs *ring,
                    struct drm_i915_gem_object *batch_obj,
@@ -1005,6 +1010,16 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
                        break;
                }
 
+               /*
+                * If the batch buffer contains a chained batch, return an
+                * error that tells the caller to abort and dispatch the
+                * workload as a non-secure batch.
+                */
+               if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+                       ret = -EACCES;
+                       break;
+               }
+
                if (desc->flags & CMD_DESC_FIXED)
                        length = desc->length.fixed;
                else
@@ -1059,6 +1074,8 @@ int i915_cmd_parser_get_version(void)
         *
         * 1. Initial version. Checks batches and reports violations, but leaves
         *    hardware parsing enabled (so does not allow new use cases).
+        * 2. Allow access to the MI_PREDICATE_SRC0 and
+        *    MI_PREDICATE_SRC1 registers.
         */
-       return 1;
+       return 2;
 }
index 063b44817e083649088f59f677fcc300679bd5ec..779a275eb1fd34fe2623e01be06846566b8dce31 100644 (file)
@@ -116,7 +116,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
 
 static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
 {
-       return obj->has_global_gtt_mapping ? "g" : " ";
+       return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
 }
 
 static void
@@ -516,7 +516,6 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
        struct intel_crtc *crtc;
        int ret;
 
@@ -529,7 +528,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                const char plane = plane_name(crtc->plane);
                struct intel_unpin_work *work;
 
-               spin_lock_irqsave(&dev->event_lock, flags);
+               spin_lock_irq(&dev->event_lock);
                work = crtc->unpin_work;
                if (work == NULL) {
                        seq_printf(m, "No flip due on pipe %c (plane %c)\n",
@@ -575,7 +574,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                                seq_printf(m, "MMIO update completed? %d\n",  addr == work->gtt_offset);
                        }
                }
-               spin_unlock_irqrestore(&dev->event_lock, flags);
+               spin_unlock_irq(&dev->event_lock);
        }
 
        mutex_unlock(&dev->struct_mutex);
@@ -717,7 +716,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                }
 
                for_each_pipe(dev_priv, pipe) {
-                       if (!intel_display_power_enabled(dev_priv,
+                       if (!intel_display_power_is_enabled(dev_priv,
                                                POWER_DOMAIN_PIPE(pipe))) {
                                seq_printf(m, "Pipe %c power disabled\n",
                                           pipe_name(pipe));
@@ -1241,11 +1240,12 @@ static int vlv_drpc_info(struct seq_file *m)
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 rpmodectl1, rcctl1;
+       u32 rpmodectl1, rcctl1, pw_status;
        unsigned fw_rendercount = 0, fw_mediacount = 0;
 
        intel_runtime_pm_get(dev_priv);
 
+       pw_status = I915_READ(VLV_GTLC_PW_STATUS);
        rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
        rcctl1 = I915_READ(GEN6_RC_CONTROL);
 
@@ -1264,11 +1264,9 @@ static int vlv_drpc_info(struct seq_file *m)
                   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
                                        GEN6_RC_CTL_EI_MODE(1))));
        seq_printf(m, "Render Power Well: %s\n",
-                       (I915_READ(VLV_GTLC_PW_STATUS) &
-                               VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
+                  (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
        seq_printf(m, "Media Power Well: %s\n",
-                       (I915_READ(VLV_GTLC_PW_STATUS) &
-                               VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
+                  (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
 
        seq_printf(m, "Render RC6 residency since boot: %u\n",
                   I915_READ(VLV_GT_RENDER_RC6));
@@ -1774,6 +1772,50 @@ static int i915_context_status(struct seq_file *m, void *unused)
        return 0;
 }
 
+static void i915_dump_lrc_obj(struct seq_file *m,
+                             struct intel_engine_cs *ring,
+                             struct drm_i915_gem_object *ctx_obj)
+{
+       struct page *page;
+       uint32_t *reg_state;
+       int j;
+       unsigned long ggtt_offset = 0;
+
+       if (ctx_obj == NULL) {
+               seq_printf(m, "Context on %s with no gem object\n",
+                          ring->name);
+               return;
+       }
+
+       seq_printf(m, "CONTEXT: %s %u\n", ring->name,
+                  intel_execlists_ctx_id(ctx_obj));
+
+       if (!i915_gem_obj_ggtt_bound(ctx_obj))
+               seq_puts(m, "\tNot bound in GGTT\n");
+       else
+               ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
+
+       if (i915_gem_object_get_pages(ctx_obj)) {
+               seq_puts(m, "\tFailed to get pages for context object\n");
+               return;
+       }
+
+       page = i915_gem_object_get_page(ctx_obj, 1);
+       if (!WARN_ON(page == NULL)) {
+               reg_state = kmap_atomic(page);
+
+               for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
+                       seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                                  ggtt_offset + 4096 + (j * 4),
+                                  reg_state[j], reg_state[j + 1],
+                                  reg_state[j + 2], reg_state[j + 3]);
+               }
+               kunmap_atomic(reg_state);
+       }
+
+       seq_putc(m, '\n');
+}
+
 static int i915_dump_lrc(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1794,29 +1836,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
 
        list_for_each_entry(ctx, &dev_priv->context_list, link) {
                for_each_ring(ring, dev_priv, i) {
-                       struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
-
-                       if (ring->default_context == ctx)
-                               continue;
-
-                       if (ctx_obj) {
-                               struct page *page = i915_gem_object_get_page(ctx_obj, 1);
-                               uint32_t *reg_state = kmap_atomic(page);
-                               int j;
-
-                               seq_printf(m, "CONTEXT: %s %u\n", ring->name,
-                                               intel_execlists_ctx_id(ctx_obj));
-
-                               for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
-                                       seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
-                                       i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
-                                       reg_state[j], reg_state[j + 1],
-                                       reg_state[j + 2], reg_state[j + 3]);
-                               }
-                               kunmap_atomic(reg_state);
-
-                               seq_putc(m, '\n');
-                       }
+                       if (ring->default_context != ctx)
+                               i915_dump_lrc_obj(m, ring,
+                                                 ctx->engine[i].state);
                }
        }
 
@@ -1849,6 +1871,8 @@ static int i915_execlists(struct seq_file *m, void *data)
        if (ret)
                return ret;
 
+       intel_runtime_pm_get(dev_priv);
+
        for_each_ring(ring, dev_priv, ring_id) {
                struct intel_ctx_submit_request *head_req = NULL;
                int count = 0;
@@ -1900,6 +1924,7 @@ static int i915_execlists(struct seq_file *m, void *data)
                seq_putc(m, '\n');
        }
 
+       intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
        return 0;
@@ -1973,6 +1998,8 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
        if (IS_GEN3(dev) || IS_GEN4(dev)) {
                seq_printf(m, "DDC = 0x%08x\n",
                           I915_READ(DCC));
+               seq_printf(m, "DDC2 = 0x%08x\n",
+                          I915_READ(DCC2));
                seq_printf(m, "C0DRB3 = 0x%04x\n",
                           I915_READ16(C0DRB3));
                seq_printf(m, "C1DRB3 = 0x%04x\n",
@@ -1986,7 +2013,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
                           I915_READ(MAD_DIMM_C2));
                seq_printf(m, "TILECTL = 0x%08x\n",
                           I915_READ(TILECTL));
-               if (IS_GEN8(dev))
+               if (INTEL_INFO(dev)->gen >= 8)
                        seq_printf(m, "GAMTARBMODE = 0x%08x\n",
                                   I915_READ(GAMTARBMODE));
                else
@@ -1995,6 +2022,10 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
                seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
                           I915_READ(DISP_ARB_CTL));
        }
+
+       if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+               seq_puts(m, "L-shaped memory detected\n");
+
        intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
@@ -2628,14 +2659,15 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
                struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
                seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
-               seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount,
-                          pll->active, yesno(pll->on));
+               seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
+                          pll->config.crtc_mask, pll->active, yesno(pll->on));
                seq_printf(m, " tracked hardware state:\n");
-               seq_printf(m, " dpll:    0x%08x\n", pll->hw_state.dpll);
-               seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
-               seq_printf(m, " fp0:     0x%08x\n", pll->hw_state.fp0);
-               seq_printf(m, " fp1:     0x%08x\n", pll->hw_state.fp1);
-               seq_printf(m, " wrpll:   0x%08x\n", pll->hw_state.wrpll);
+               seq_printf(m, " dpll:    0x%08x\n", pll->config.hw_state.dpll);
+               seq_printf(m, " dpll_md: 0x%08x\n",
+                          pll->config.hw_state.dpll_md);
+               seq_printf(m, " fp0:     0x%08x\n", pll->config.hw_state.fp0);
+               seq_printf(m, " fp1:     0x%08x\n", pll->config.hw_state.fp1);
+               seq_printf(m, " wrpll:   0x%08x\n", pll->config.hw_state.wrpll);
        }
        drm_modeset_unlock_all(dev);
 
@@ -2656,18 +2688,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
 
        intel_runtime_pm_get(dev_priv);
 
-       seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs);
-       for (i = 0; i < dev_priv->num_wa_regs; ++i) {
-               u32 addr, mask;
-
-               addr = dev_priv->intel_wa_regs[i].addr;
-               mask = dev_priv->intel_wa_regs[i].mask;
-               dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask;
-               if (dev_priv->intel_wa_regs[i].addr)
-                       seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
-                                  dev_priv->intel_wa_regs[i].addr,
-                                  dev_priv->intel_wa_regs[i].value,
-                                  dev_priv->intel_wa_regs[i].mask);
+       seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
+       for (i = 0; i < dev_priv->workarounds.count; ++i) {
+               u32 addr, mask, value, read;
+               bool ok;
+
+               addr = dev_priv->workarounds.reg[i].addr;
+               mask = dev_priv->workarounds.reg[i].mask;
+               value = dev_priv->workarounds.reg[i].value;
+               read = I915_READ(addr);
+               ok = (value & mask) == (read & mask);
+               seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
+                          addr, value, mask, read, ok ? "OK" : "FAIL");
        }
 
        intel_runtime_pm_put(dev_priv);
@@ -2676,6 +2708,42 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
        return 0;
 }
 
+static int i915_ddb_info(struct seq_file *m, void *unused)
+{
+       struct drm_info_node *node = m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct skl_ddb_allocation *ddb;
+       struct skl_ddb_entry *entry;
+       enum pipe pipe;
+       int plane;
+
+       drm_modeset_lock_all(dev);
+
+       ddb = &dev_priv->wm.skl_hw.ddb;
+
+       seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
+
+       for_each_pipe(dev_priv, pipe) {
+               seq_printf(m, "Pipe %c\n", pipe_name(pipe));
+
+               for_each_plane(pipe, plane) {
+                       entry = &ddb->plane[pipe][plane];
+                       seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
+                                  entry->start, entry->end,
+                                  skl_ddb_entry_size(entry));
+               }
+
+               entry = &ddb->cursor[pipe];
+               seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
+                          entry->end, skl_ddb_entry_size(entry));
+       }
+
+       drm_modeset_unlock_all(dev);
+
+       return 0;
+}
+
 struct pipe_crc_info {
        const char *name;
        struct drm_device *dev;
@@ -2969,6 +3037,8 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
                                break;
                        }
                        break;
+               default:
+                       break;
                }
        }
        drm_modeset_unlock_all(dev);
@@ -3256,6 +3326,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+       struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
+                                                                       pipe));
        u32 val = 0; /* shut up gcc */
        int ret;
 
@@ -3266,6 +3338,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
        if (pipe_crc->source && source)
                return -EINVAL;
 
+       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) {
+               DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
+               return -EIO;
+       }
+
        if (IS_GEN2(dev))
                ret = i8xx_pipe_crc_ctl_reg(&source, &val);
        else if (INTEL_INFO(dev)->gen < 5)
@@ -3291,6 +3368,14 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
                if (!pipe_crc->entries)
                        return -ENOMEM;
 
+               /*
+                * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+                * enabled and disabled dynamically based on package C states,
+                * user space can't make reliable use of the CRCs, so let's just
+                * completely disable it.
+                */
+               hsw_disable_ips(crtc);
+
                spin_lock_irq(&pipe_crc->lock);
                pipe_crc->head = 0;
                pipe_crc->tail = 0;
@@ -3329,6 +3414,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
                        vlv_undo_pipe_scramble_reset(dev, pipe);
                else if (IS_HASWELL(dev) && pipe == PIPE_A)
                        hsw_undo_trans_edp_pipe_A_crc_wa(dev);
+
+               hsw_enable_ips(crtc);
        }
 
        return 0;
@@ -3506,7 +3593,7 @@ static const struct file_operations i915_display_crc_ctl_fops = {
        .write = display_crc_ctl_write
 };
 
-static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
+static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 {
        struct drm_device *dev = m->private;
        int num_levels = ilk_wm_max_level(dev) + 1;
@@ -3517,13 +3604,17 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
        for (level = 0; level < num_levels; level++) {
                unsigned int latency = wm[level];
 
-               /* WM1+ latency values in 0.5us units */
-               if (level > 0)
+               /*
+                * - WM1+ latency values in 0.5us units
+                * - latencies are in us on gen9
+                */
+               if (INTEL_INFO(dev)->gen >= 9)
+                       latency *= 10;
+               else if (level > 0)
                        latency *= 5;
 
                seq_printf(m, "WM%d %u (%u.%u usec)\n",
-                          level, wm[level],
-                          latency / 10, latency % 10);
+                          level, wm[level], latency / 10, latency % 10);
        }
 
        drm_modeset_unlock_all(dev);
@@ -3532,8 +3623,15 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
 static int pri_wm_latency_show(struct seq_file *m, void *data)
 {
        struct drm_device *dev = m->private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       const uint16_t *latencies;
+
+       if (INTEL_INFO(dev)->gen >= 9)
+               latencies = dev_priv->wm.skl_latency;
+       else
+               latencies = to_i915(dev)->wm.pri_latency;
 
-       wm_latency_show(m, to_i915(dev)->wm.pri_latency);
+       wm_latency_show(m, latencies);
 
        return 0;
 }
@@ -3541,8 +3639,15 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
 static int spr_wm_latency_show(struct seq_file *m, void *data)
 {
        struct drm_device *dev = m->private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       const uint16_t *latencies;
+
+       if (INTEL_INFO(dev)->gen >= 9)
+               latencies = dev_priv->wm.skl_latency;
+       else
+               latencies = to_i915(dev)->wm.spr_latency;
 
-       wm_latency_show(m, to_i915(dev)->wm.spr_latency);
+       wm_latency_show(m, latencies);
 
        return 0;
 }
@@ -3550,8 +3655,15 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
 static int cur_wm_latency_show(struct seq_file *m, void *data)
 {
        struct drm_device *dev = m->private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       const uint16_t *latencies;
+
+       if (INTEL_INFO(dev)->gen >= 9)
+               latencies = dev_priv->wm.skl_latency;
+       else
+               latencies = to_i915(dev)->wm.cur_latency;
 
-       wm_latency_show(m, to_i915(dev)->wm.cur_latency);
+       wm_latency_show(m, latencies);
 
        return 0;
 }
@@ -3587,11 +3699,11 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
 }
 
 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
-                               size_t len, loff_t *offp, uint16_t wm[5])
+                               size_t len, loff_t *offp, uint16_t wm[8])
 {
        struct seq_file *m = file->private_data;
        struct drm_device *dev = m->private;
-       uint16_t new[5] = { 0 };
+       uint16_t new[8] = { 0 };
        int num_levels = ilk_wm_max_level(dev) + 1;
        int level;
        int ret;
@@ -3605,7 +3717,9 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
 
        tmp[len] = '\0';
 
-       ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
+       ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
+                    &new[0], &new[1], &new[2], &new[3],
+                    &new[4], &new[5], &new[6], &new[7]);
        if (ret != num_levels)
                return -EINVAL;
 
@@ -3625,8 +3739,15 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
 {
        struct seq_file *m = file->private_data;
        struct drm_device *dev = m->private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint16_t *latencies;
 
-       return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency);
+       if (INTEL_INFO(dev)->gen >= 9)
+               latencies = dev_priv->wm.skl_latency;
+       else
+               latencies = to_i915(dev)->wm.pri_latency;
+
+       return wm_latency_write(file, ubuf, len, offp, latencies);
 }
 
 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
@@ -3634,8 +3755,15 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
 {
        struct seq_file *m = file->private_data;
        struct drm_device *dev = m->private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint16_t *latencies;
 
-       return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency);
+       if (INTEL_INFO(dev)->gen >= 9)
+               latencies = dev_priv->wm.skl_latency;
+       else
+               latencies = to_i915(dev)->wm.spr_latency;
+
+       return wm_latency_write(file, ubuf, len, offp, latencies);
 }
 
 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
@@ -3643,8 +3771,15 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
 {
        struct seq_file *m = file->private_data;
        struct drm_device *dev = m->private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint16_t *latencies;
+
+       if (INTEL_INFO(dev)->gen >= 9)
+               latencies = dev_priv->wm.skl_latency;
+       else
+               latencies = to_i915(dev)->wm.cur_latency;
 
-       return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency);
+       return wm_latency_write(file, ubuf, len, offp, latencies);
 }
 
 static const struct file_operations i915_pri_wm_latency_fops = {
@@ -4187,6 +4322,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
        {"i915_dp_mst_info", i915_dp_mst_info, 0},
        {"i915_wa_registers", i915_wa_registers, 0},
+       {"i915_ddb_info", i915_ddb_info, 0},
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 
index 1403b01e821695cce6c42cb8b3704eb284aba17e..ecee3bcc8772907850a536166851624ab4795800 100644 (file)
 #include <linux/pm_runtime.h>
 #include <linux/oom.h>
 
-#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
-
-#define BEGIN_LP_RING(n) \
-       intel_ring_begin(LP_RING(dev_priv), (n))
-
-#define OUT_RING(x) \
-       intel_ring_emit(LP_RING(dev_priv), x)
-
-#define ADVANCE_LP_RING() \
-       __intel_ring_advance(LP_RING(dev_priv))
-
-/**
- * Lock test for when it's just for synchronization of ring access.
- *
- * In that case, we don't need to do it when GEM is initialized as nobody else
- * has access to the ring.
- */
-#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {                     \
-       if (LP_RING(dev->dev_private)->buffer->obj == NULL)                     \
-               LOCK_TEST_WITH_RETURN(dev, file);                       \
-} while (0)
-
-static inline u32
-intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
-{
-       if (I915_NEED_GFX_HWS(dev_priv->dev))
-               return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
-       else
-               return intel_read_status_page(LP_RING(dev_priv), reg);
-}
-
-#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
-#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
-#define I915_BREADCRUMB_INDEX          0x21
-
-void i915_update_dri1_breadcrumb(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_master_private *master_priv;
-
-       /*
-        * The dri breadcrumb update races against the drm master disappearing.
-        * Instead of trying to fix this (this is by far not the only ums issue)
-        * just don't do the update in kms mode.
-        */
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return;
-
-       if (dev->primary->master) {
-               master_priv = dev->primary->master->driver_priv;
-               if (master_priv->sarea_priv)
-                       master_priv->sarea_priv->last_dispatch =
-                               READ_BREADCRUMB(dev_priv);
-       }
-}
-
-static void i915_write_hws_pga(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 addr;
-
-       addr = dev_priv->status_page_dmah->busaddr;
-       if (INTEL_INFO(dev)->gen >= 4)
-               addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
-       I915_WRITE(HWS_PGA, addr);
-}
-
-/**
- * Frees the hardware status page, whether it's a physical address or a virtual
- * address set up by the X Server.
- */
-static void i915_free_hws(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = LP_RING(dev_priv);
-
-       if (dev_priv->status_page_dmah) {
-               drm_pci_free(dev, dev_priv->status_page_dmah);
-               dev_priv->status_page_dmah = NULL;
-       }
-
-       if (ring->status_page.gfx_addr) {
-               ring->status_page.gfx_addr = 0;
-               iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
-       }
-
-       /* Need to rewrite hardware status page */
-       I915_WRITE(HWS_PGA, 0x1ffff000);
-}
-
-void i915_kernel_lost_context(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_master_private *master_priv;
-       struct intel_engine_cs *ring = LP_RING(dev_priv);
-       struct intel_ringbuffer *ringbuf = ring->buffer;
-
-       /*
-        * We should never lose context on the ring with modesetting
-        * as we don't expose it to userspace
-        */
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return;
-
-       ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
-       ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-       ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
-       if (ringbuf->space < 0)
-               ringbuf->space += ringbuf->size;
-
-       if (!dev->primary->master)
-               return;
-
-       master_priv = dev->primary->master->driver_priv;
-       if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
-               master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
-}
-
-static int i915_dma_cleanup(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int i;
-
-       /* Make sure interrupts are disabled here because the uninstall ioctl
-        * may not have been called from userspace and after dev_private
-        * is freed, it's too late.
-        */
-       if (dev->irq_enabled)
-               drm_irq_uninstall(dev);
-
-       mutex_lock(&dev->struct_mutex);
-       for (i = 0; i < I915_NUM_RINGS; i++)
-               intel_cleanup_ring_buffer(&dev_priv->ring[i]);
-       mutex_unlock(&dev->struct_mutex);
-
-       /* Clear the HWS virtual address at teardown */
-       if (I915_NEED_GFX_HWS(dev))
-               i915_free_hws(dev);
-
-       return 0;
-}
-
-static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-       int ret;
-
-       master_priv->sarea = drm_legacy_getsarea(dev);
-       if (master_priv->sarea) {
-               master_priv->sarea_priv = (drm_i915_sarea_t *)
-                       ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
-       } else {
-               DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
-       }
-
-       if (init->ring_size != 0) {
-               if (LP_RING(dev_priv)->buffer->obj != NULL) {
-                       i915_dma_cleanup(dev);
-                       DRM_ERROR("Client tried to initialize ringbuffer in "
-                                 "GEM mode\n");
-                       return -EINVAL;
-               }
-
-               ret = intel_render_ring_init_dri(dev,
-                                                init->ring_start,
-                                                init->ring_size);
-               if (ret) {
-                       i915_dma_cleanup(dev);
-                       return ret;
-               }
-       }
-
-       dev_priv->dri1.cpp = init->cpp;
-       dev_priv->dri1.back_offset = init->back_offset;
-       dev_priv->dri1.front_offset = init->front_offset;
-       dev_priv->dri1.current_page = 0;
-       if (master_priv->sarea_priv)
-               master_priv->sarea_priv->pf_current_page = 0;
-
-       /* Allow hardware batchbuffers unless told otherwise.
-        */
-       dev_priv->dri1.allow_batchbuffer = 1;
-
-       return 0;
-}
-
-static int i915_dma_resume(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = LP_RING(dev_priv);
-
-       DRM_DEBUG_DRIVER("%s\n", __func__);
-
-       if (ring->buffer->virtual_start == NULL) {
-               DRM_ERROR("can not ioremap virtual address for"
-                         " ring buffer\n");
-               return -ENOMEM;
-       }
-
-       /* Program Hardware Status Page */
-       if (!ring->status_page.page_addr) {
-               DRM_ERROR("Can not find hardware status page\n");
-               return -EINVAL;
-       }
-       DRM_DEBUG_DRIVER("hw status page @ %p\n",
-                               ring->status_page.page_addr);
-       if (ring->status_page.gfx_addr != 0)
-               intel_ring_setup_status_page(ring);
-       else
-               i915_write_hws_pga(dev);
-
-       DRM_DEBUG_DRIVER("Enabled hardware status page\n");
-
-       return 0;
-}
-
-static int i915_dma_init(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
-{
-       drm_i915_init_t *init = data;
-       int retcode = 0;
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -ENODEV;
-
-       switch (init->func) {
-       case I915_INIT_DMA:
-               retcode = i915_initialize(dev, init);
-               break;
-       case I915_CLEANUP_DMA:
-               retcode = i915_dma_cleanup(dev);
-               break;
-       case I915_RESUME_DMA:
-               retcode = i915_dma_resume(dev);
-               break;
-       default:
-               retcode = -EINVAL;
-               break;
-       }
-
-       return retcode;
-}
-
-/* Implement basically the same security restrictions as hardware does
- * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
- *
- * Most of the calculations below involve calculating the size of a
- * particular instruction.  It's important to get the size right as
- * that tells us where the next instruction to check is.  Any illegal
- * instruction detected will be given a size of zero, which is a
- * signal to abort the rest of the buffer.
- */
-static int validate_cmd(int cmd)
-{
-       switch (((cmd >> 29) & 0x7)) {
-       case 0x0:
-               switch ((cmd >> 23) & 0x3f) {
-               case 0x0:
-                       return 1;       /* MI_NOOP */
-               case 0x4:
-                       return 1;       /* MI_FLUSH */
-               default:
-                       return 0;       /* disallow everything else */
-               }
-               break;
-       case 0x1:
-               return 0;       /* reserved */
-       case 0x2:
-               return (cmd & 0xff) + 2;        /* 2d commands */
-       case 0x3:
-               if (((cmd >> 24) & 0x1f) <= 0x18)
-                       return 1;
-
-               switch ((cmd >> 24) & 0x1f) {
-               case 0x1c:
-                       return 1;
-               case 0x1d:
-                       switch ((cmd >> 16) & 0xff) {
-                       case 0x3:
-                               return (cmd & 0x1f) + 2;
-                       case 0x4:
-                               return (cmd & 0xf) + 2;
-                       default:
-                               return (cmd & 0xffff) + 2;
-                       }
-               case 0x1e:
-                       if (cmd & (1 << 23))
-                               return (cmd & 0xffff) + 1;
-                       else
-                               return 1;
-               case 0x1f:
-                       if ((cmd & (1 << 23)) == 0)     /* inline vertices */
-                               return (cmd & 0x1ffff) + 2;
-                       else if (cmd & (1 << 17))       /* indirect random */
-                               if ((cmd & 0xffff) == 0)
-                                       return 0;       /* unknown length, too hard */
-                               else
-                                       return (((cmd & 0xffff) + 1) / 2) + 1;
-                       else
-                               return 2;       /* indirect sequential */
-               default:
-                       return 0;
-               }
-       default:
-               return 0;
-       }
-
-       return 0;
-}
-
-static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int i, ret;
-
-       if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
-               return -EINVAL;
-
-       for (i = 0; i < dwords;) {
-               int sz = validate_cmd(buffer[i]);
-
-               if (sz == 0 || i + sz > dwords)
-                       return -EINVAL;
-               i += sz;
-       }
-
-       ret = BEGIN_LP_RING((dwords+1)&~1);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < dwords; i++)
-               OUT_RING(buffer[i]);
-       if (dwords & 1)
-               OUT_RING(0);
-
-       ADVANCE_LP_RING();
-
-       return 0;
-}
-
-int
-i915_emit_box(struct drm_device *dev,
-             struct drm_clip_rect *box,
-             int DR1, int DR4)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
-
-       if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
-           box->y2 <= 0 || box->x2 <= 0) {
-               DRM_ERROR("Bad box %d,%d..%d,%d\n",
-                         box->x1, box->y1, box->x2, box->y2);
-               return -EINVAL;
-       }
-
-       if (INTEL_INFO(dev)->gen >= 4) {
-               ret = BEGIN_LP_RING(4);
-               if (ret)
-                       return ret;
-
-               OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
-               OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
-               OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
-               OUT_RING(DR4);
-       } else {
-               ret = BEGIN_LP_RING(6);
-               if (ret)
-                       return ret;
-
-               OUT_RING(GFX_OP_DRAWRECT_INFO);
-               OUT_RING(DR1);
-               OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
-               OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
-               OUT_RING(DR4);
-               OUT_RING(0);
-       }
-       ADVANCE_LP_RING();
-
-       return 0;
-}
-
-/* XXX: Emitting the counter should really be moved to part of the IRQ
- * emit. For now, do it in both places:
- */
-
-static void i915_emit_breadcrumb(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
-       dev_priv->dri1.counter++;
-       if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
-               dev_priv->dri1.counter = 0;
-       if (master_priv->sarea_priv)
-               master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
-
-       if (BEGIN_LP_RING(4) == 0) {
-               OUT_RING(MI_STORE_DWORD_INDEX);
-               OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-               OUT_RING(dev_priv->dri1.counter);
-               OUT_RING(0);
-               ADVANCE_LP_RING();
-       }
-}
-
-static int i915_dispatch_cmdbuffer(struct drm_device *dev,
-                                  drm_i915_cmdbuffer_t *cmd,
-                                  struct drm_clip_rect *cliprects,
-                                  void *cmdbuf)
-{
-       int nbox = cmd->num_cliprects;
-       int i = 0, count, ret;
-
-       if (cmd->sz & 0x3) {
-               DRM_ERROR("alignment");
-               return -EINVAL;
-       }
-
-       i915_kernel_lost_context(dev);
-
-       count = nbox ? nbox : 1;
-
-       for (i = 0; i < count; i++) {
-               if (i < nbox) {
-                       ret = i915_emit_box(dev, &cliprects[i],
-                                           cmd->DR1, cmd->DR4);
-                       if (ret)
-                               return ret;
-               }
-
-               ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
-               if (ret)
-                       return ret;
-       }
-
-       i915_emit_breadcrumb(dev);
-       return 0;
-}
-
-static int i915_dispatch_batchbuffer(struct drm_device *dev,
-                                    drm_i915_batchbuffer_t *batch,
-                                    struct drm_clip_rect *cliprects)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int nbox = batch->num_cliprects;
-       int i, count, ret;
-
-       if ((batch->start | batch->used) & 0x7) {
-               DRM_ERROR("alignment");
-               return -EINVAL;
-       }
-
-       i915_kernel_lost_context(dev);
-
-       count = nbox ? nbox : 1;
-       for (i = 0; i < count; i++) {
-               if (i < nbox) {
-                       ret = i915_emit_box(dev, &cliprects[i],
-                                           batch->DR1, batch->DR4);
-                       if (ret)
-                               return ret;
-               }
-
-               if (!IS_I830(dev) && !IS_845G(dev)) {
-                       ret = BEGIN_LP_RING(2);
-                       if (ret)
-                               return ret;
-
-                       if (INTEL_INFO(dev)->gen >= 4) {
-                               OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
-                               OUT_RING(batch->start);
-                       } else {
-                               OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
-                               OUT_RING(batch->start | MI_BATCH_NON_SECURE);
-                       }
-               } else {
-                       ret = BEGIN_LP_RING(4);
-                       if (ret)
-                               return ret;
-
-                       OUT_RING(MI_BATCH_BUFFER);
-                       OUT_RING(batch->start | MI_BATCH_NON_SECURE);
-                       OUT_RING(batch->start + batch->used - 4);
-                       OUT_RING(0);
-               }
-               ADVANCE_LP_RING();
-       }
-
-
-       if (IS_G4X(dev) || IS_GEN5(dev)) {
-               if (BEGIN_LP_RING(2) == 0) {
-                       OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
-                       OUT_RING(MI_NOOP);
-                       ADVANCE_LP_RING();
-               }
-       }
-
-       i915_emit_breadcrumb(dev);
-       return 0;
-}
-
-static int i915_dispatch_flip(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_master_private *master_priv =
-               dev->primary->master->driver_priv;
-       int ret;
-
-       if (!master_priv->sarea_priv)
-               return -EINVAL;
-
-       DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
-                         __func__,
-                        dev_priv->dri1.current_page,
-                        master_priv->sarea_priv->pf_current_page);
-
-       i915_kernel_lost_context(dev);
-
-       ret = BEGIN_LP_RING(10);
-       if (ret)
-               return ret;
-
-       OUT_RING(MI_FLUSH | MI_READ_FLUSH);
-       OUT_RING(0);
-
-       OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
-       OUT_RING(0);
-       if (dev_priv->dri1.current_page == 0) {
-               OUT_RING(dev_priv->dri1.back_offset);
-               dev_priv->dri1.current_page = 1;
-       } else {
-               OUT_RING(dev_priv->dri1.front_offset);
-               dev_priv->dri1.current_page = 0;
-       }
-       OUT_RING(0);
-
-       OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
-       OUT_RING(0);
-
-       ADVANCE_LP_RING();
-
-       master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
-
-       if (BEGIN_LP_RING(4) == 0) {
-               OUT_RING(MI_STORE_DWORD_INDEX);
-               OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-               OUT_RING(dev_priv->dri1.counter);
-               OUT_RING(0);
-               ADVANCE_LP_RING();
-       }
-
-       master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
-       return 0;
-}
-
-static int i915_quiescent(struct drm_device *dev)
-{
-       i915_kernel_lost_context(dev);
-       return intel_ring_idle(LP_RING(dev->dev_private));
-}
-
-static int i915_flush_ioctl(struct drm_device *dev, void *data,
-                           struct drm_file *file_priv)
-{
-       int ret;
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -ENODEV;
-
-       RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-       mutex_lock(&dev->struct_mutex);
-       ret = i915_quiescent(dev);
-       mutex_unlock(&dev->struct_mutex);
-
-       return ret;
-}
-
-static int i915_batchbuffer(struct drm_device *dev, void *data,
-                           struct drm_file *file_priv)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_master_private *master_priv;
-       drm_i915_sarea_t *sarea_priv;
-       drm_i915_batchbuffer_t *batch = data;
-       int ret;
-       struct drm_clip_rect *cliprects = NULL;
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -ENODEV;
-
-       master_priv = dev->primary->master->driver_priv;
-       sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
-
-       if (!dev_priv->dri1.allow_batchbuffer) {
-               DRM_ERROR("Batchbuffer ioctl disabled\n");
-               return -EINVAL;
-       }
-
-       DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
-                       batch->start, batch->used, batch->num_cliprects);
-
-       RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-       if (batch->num_cliprects < 0)
-               return -EINVAL;
-
-       if (batch->num_cliprects) {
-               cliprects = kcalloc(batch->num_cliprects,
-                                   sizeof(*cliprects),
-                                   GFP_KERNEL);
-               if (cliprects == NULL)
-                       return -ENOMEM;
-
-               ret = copy_from_user(cliprects, batch->cliprects,
-                                    batch->num_cliprects *
-                                    sizeof(struct drm_clip_rect));
-               if (ret != 0) {
-                       ret = -EFAULT;
-                       goto fail_free;
-               }
-       }
-
-       mutex_lock(&dev->struct_mutex);
-       ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
-       mutex_unlock(&dev->struct_mutex);
-
-       if (sarea_priv)
-               sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
-fail_free:
-       kfree(cliprects);
-
-       return ret;
-}
-
-static int i915_cmdbuffer(struct drm_device *dev, void *data,
-                         struct drm_file *file_priv)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_master_private *master_priv;
-       drm_i915_sarea_t *sarea_priv;
-       drm_i915_cmdbuffer_t *cmdbuf = data;
-       struct drm_clip_rect *cliprects = NULL;
-       void *batch_data;
-       int ret;
-
-       DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
-                       cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -ENODEV;
-
-       master_priv = dev->primary->master->driver_priv;
-       sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
-
-       RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-       if (cmdbuf->num_cliprects < 0)
-               return -EINVAL;
-
-       batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
-       if (batch_data == NULL)
-               return -ENOMEM;
-
-       ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
-       if (ret != 0) {
-               ret = -EFAULT;
-               goto fail_batch_free;
-       }
-
-       if (cmdbuf->num_cliprects) {
-               cliprects = kcalloc(cmdbuf->num_cliprects,
-                                   sizeof(*cliprects), GFP_KERNEL);
-               if (cliprects == NULL) {
-                       ret = -ENOMEM;
-                       goto fail_batch_free;
-               }
-
-               ret = copy_from_user(cliprects, cmdbuf->cliprects,
-                                    cmdbuf->num_cliprects *
-                                    sizeof(struct drm_clip_rect));
-               if (ret != 0) {
-                       ret = -EFAULT;
-                       goto fail_clip_free;
-               }
-       }
-
-       mutex_lock(&dev->struct_mutex);
-       ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
-       mutex_unlock(&dev->struct_mutex);
-       if (ret) {
-               DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
-               goto fail_clip_free;
-       }
-
-       if (sarea_priv)
-               sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
-fail_clip_free:
-       kfree(cliprects);
-fail_batch_free:
-       kfree(batch_data);
-
-       return ret;
-}
-
-static int i915_emit_irq(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
-       i915_kernel_lost_context(dev);
-
-       DRM_DEBUG_DRIVER("\n");
-
-       dev_priv->dri1.counter++;
-       if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
-               dev_priv->dri1.counter = 1;
-       if (master_priv->sarea_priv)
-               master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
-
-       if (BEGIN_LP_RING(4) == 0) {
-               OUT_RING(MI_STORE_DWORD_INDEX);
-               OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-               OUT_RING(dev_priv->dri1.counter);
-               OUT_RING(MI_USER_INTERRUPT);
-               ADVANCE_LP_RING();
-       }
-
-       return dev_priv->dri1.counter;
-}
-
-static int i915_wait_irq(struct drm_device *dev, int irq_nr)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-       int ret = 0;
-       struct intel_engine_cs *ring = LP_RING(dev_priv);
-
-       DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
-                 READ_BREADCRUMB(dev_priv));
-
-       if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
-               if (master_priv->sarea_priv)
-                       master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-               return 0;
-       }
-
-       if (master_priv->sarea_priv)
-               master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-
-       if (ring->irq_get(ring)) {
-               DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
-                           READ_BREADCRUMB(dev_priv) >= irq_nr);
-               ring->irq_put(ring);
-       } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
-               ret = -EBUSY;
-
-       if (ret == -EBUSY) {
-               DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
-                         READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
-       }
-
-       return ret;
-}
-
-/* Needs the lock as it touches the ring.
- */
-static int i915_irq_emit(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       drm_i915_irq_emit_t *emit = data;
-       int result;
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -ENODEV;
-
-       if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
-               DRM_ERROR("called with no initialization\n");
-               return -EINVAL;
-       }
-
-       RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-       mutex_lock(&dev->struct_mutex);
-       result = i915_emit_irq(dev);
-       mutex_unlock(&dev->struct_mutex);
-
-       if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
-               DRM_ERROR("copy_to_user\n");
-               return -EFAULT;
-       }
-
-       return 0;
-}
-
-/* Doesn't need the hardware lock.
- */
-static int i915_irq_wait(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       drm_i915_irq_wait_t *irqwait = data;
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -ENODEV;
-
-       if (!dev_priv) {
-               DRM_ERROR("called with no initialization\n");
-               return -EINVAL;
-       }
-
-       return i915_wait_irq(dev, irqwait->irq_seq);
-}
-
-static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       drm_i915_vblank_pipe_t *pipe = data;
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -ENODEV;
-
-       if (!dev_priv) {
-               DRM_ERROR("called with no initialization\n");
-               return -EINVAL;
-       }
-
-       pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
-       return 0;
-}
-
-/**
- * Schedule buffer swap at given vertical blank.
- */
-static int i915_vblank_swap(struct drm_device *dev, void *data,
-                    struct drm_file *file_priv)
-{
-       /* The delayed swap mechanism was fundamentally racy, and has been
-        * removed.  The model was that the client requested a delayed flip/swap
-        * from the kernel, then waited for vblank before continuing to perform
-        * rendering.  The problem was that the kernel might wake the client
-        * up before it dispatched the vblank swap (since the lock has to be
-        * held while touching the ringbuffer), in which case the client would
-        * clear and start the next frame before the swap occurred, and
-        * flicker would occur in addition to likely missing the vblank.
-        *
-        * In the absence of this ioctl, userland falls back to a correct path
-        * of waiting for a vblank, then dispatching the swap on its own.
-        * Context switching to userland and back is plenty fast enough for
-        * meeting the requirements of vblank swapping.
-        */
-       return -EINVAL;
-}
-
-static int i915_flip_bufs(struct drm_device *dev, void *data,
-                         struct drm_file *file_priv)
-{
-       int ret;
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -ENODEV;
-
-       DRM_DEBUG_DRIVER("%s\n", __func__);
-
-       RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-       mutex_lock(&dev->struct_mutex);
-       ret = i915_dispatch_flip(dev);
-       mutex_unlock(&dev->struct_mutex);
-
-       return ret;
-}
 
 static int i915_getparam(struct drm_device *dev, void *data,
                         struct drm_file *file_priv)
@@ -936,21 +58,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
        drm_i915_getparam_t *param = data;
        int value;
 
-       if (!dev_priv) {
-               DRM_ERROR("called with no initialization\n");
-               return -EINVAL;
-       }
-
        switch (param->param) {
        case I915_PARAM_IRQ_ACTIVE:
-               value = dev->pdev->irq ? 1 : 0;
-               break;
        case I915_PARAM_ALLOW_BATCHBUFFER:
-               value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
-               break;
        case I915_PARAM_LAST_DISPATCH:
-               value = READ_BREADCRUMB(dev_priv);
-               break;
+               /* Reject all old ums/dri params. */
+               return -ENODEV;
        case I915_PARAM_CHIPSET_ID:
                value = dev->pdev->device;
                break;
@@ -1027,6 +140,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_CMD_PARSER_VERSION:
                value = i915_cmd_parser_get_version();
                break;
+       case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+               value = 1;
+               break;
        default:
                DRM_DEBUG("Unknown parameter %d\n", param->param);
                return -EINVAL;
@@ -1046,19 +162,13 @@ static int i915_setparam(struct drm_device *dev, void *data,
        struct drm_i915_private *dev_priv = dev->dev_private;
        drm_i915_setparam_t *param = data;
 
-       if (!dev_priv) {
-               DRM_ERROR("called with no initialization\n");
-               return -EINVAL;
-       }
-
        switch (param->param) {
        case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
-               break;
        case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
-               break;
        case I915_SETPARAM_ALLOW_BATCHBUFFER:
-               dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
-               break;
+               /* Reject all old ums/dri params. */
+               return -ENODEV;
+
        case I915_SETPARAM_NUM_USED_FENCES:
                if (param->value > dev_priv->num_fence_regs ||
                    param->value < 0)
@@ -1075,54 +185,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
        return 0;
 }
 
-static int i915_set_status_page(struct drm_device *dev, void *data,
-                               struct drm_file *file_priv)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       drm_i915_hws_addr_t *hws = data;
-       struct intel_engine_cs *ring;
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -ENODEV;
-
-       if (!I915_NEED_GFX_HWS(dev))
-               return -EINVAL;
-
-       if (!dev_priv) {
-               DRM_ERROR("called with no initialization\n");
-               return -EINVAL;
-       }
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               WARN(1, "tried to set status page when mode setting active\n");
-               return 0;
-       }
-
-       DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
-
-       ring = LP_RING(dev_priv);
-       ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
-
-       dev_priv->dri1.gfx_hws_cpu_addr =
-               ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
-       if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
-               i915_dma_cleanup(dev);
-               ring->status_page.gfx_addr = 0;
-               DRM_ERROR("can not ioremap virtual address for"
-                               " G33 hw status page\n");
-               return -ENOMEM;
-       }
-
-       memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
-       I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
-
-       DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
-                        ring->status_page.gfx_addr);
-       DRM_DEBUG_DRIVER("load hws at %p\n",
-                        ring->status_page.page_addr);
-       return 0;
-}
-
 static int i915_get_bridge_dev(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1275,12 +337,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                /* i915 resume handler doesn't set to D0 */
                pci_set_power_state(dev->pdev, PCI_D0);
-               i915_resume(dev);
+               i915_resume_legacy(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_ON;
        } else {
                pr_err("switched off\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-               i915_suspend(dev, pmm);
+               i915_suspend_legacy(dev, pmm);
                dev->switch_power_state = DRM_SWITCH_POWER_OFF;
        }
 }
@@ -1338,14 +400,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
        intel_power_domains_init_hw(dev_priv);
 
-       /*
-        * We enable some interrupt sources in our postinstall hooks, so mark
-        * interrupts as enabled _before_ actually enabling them to avoid
-        * special cases in our ordering checks.
-        */
-       dev_priv->pm._irqs_disabled = false;
-
-       ret = drm_irq_install(dev, dev->pdev->irq);
+       ret = intel_irq_install(dev_priv);
        if (ret)
                goto cleanup_gem_stolen;
 
@@ -1370,7 +425,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
                goto cleanup_gem;
 
        /* Only enable hotplug handling once the fbdev is fully set up. */
-       intel_hpd_init(dev);
+       intel_hpd_init(dev_priv);
 
        /*
         * Some ports require correctly set-up hpd registers for detection to
@@ -1405,30 +460,6 @@ out:
        return ret;
 }
 
-int i915_master_create(struct drm_device *dev, struct drm_master *master)
-{
-       struct drm_i915_master_private *master_priv;
-
-       master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
-       if (!master_priv)
-               return -ENOMEM;
-
-       master->driver_priv = master_priv;
-       return 0;
-}
-
-void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
-{
-       struct drm_i915_master_private *master_priv = master->driver_priv;
-
-       if (!master_priv)
-               return;
-
-       kfree(master_priv);
-
-       master->driver_priv = NULL;
-}
-
 #if IS_ENABLED(CONFIG_FB)
 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 {
@@ -1534,7 +565,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
 
        info = (struct intel_device_info *)&dev_priv->info;
 
-       if (IS_VALLEYVIEW(dev))
+       if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
                for_each_pipe(dev_priv, pipe)
                        info->num_sprites[pipe] = 2;
        else
@@ -1614,7 +645,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        spin_lock_init(&dev_priv->irq_lock);
        spin_lock_init(&dev_priv->gpu_error.lock);
-       spin_lock_init(&dev_priv->backlight_lock);
+       mutex_init(&dev_priv->backlight_lock);
        spin_lock_init(&dev_priv->uncore.lock);
        spin_lock_init(&dev_priv->mm.object_stat_lock);
        spin_lock_init(&dev_priv->mmio_flip_lock);
@@ -1670,15 +701,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto out_regs;
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               ret = i915_kick_out_vgacon(dev_priv);
+               /* WARNING: Apparently we must kick fbdev drivers before vgacon,
+                * otherwise the vga fbdev driver falls over. */
+               ret = i915_kick_out_firmware_fb(dev_priv);
                if (ret) {
-                       DRM_ERROR("failed to remove conflicting VGA console\n");
+                       DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
                        goto out_gtt;
                }
 
-               ret = i915_kick_out_firmware_fb(dev_priv);
+               ret = i915_kick_out_vgacon(dev_priv);
                if (ret) {
-                       DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+                       DRM_ERROR("failed to remove conflicting VGA console\n");
                        goto out_gtt;
                }
        }
@@ -1740,7 +773,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto out_freewq;
        }
 
-       intel_irq_init(dev);
+       intel_irq_init(dev_priv);
        intel_uncore_sanitize(dev);
 
        /* Try to make sure MCHBAR is enabled before poking at it */
@@ -1782,9 +815,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                        DRM_ERROR("failed to init modeset\n");
                        goto out_power_well;
                }
-       } else {
-               /* Start out suspended in ums mode. */
-               dev_priv->ums.mm_suspended = 1;
        }
 
        i915_setup_sysfs(dev);
@@ -1798,12 +828,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (IS_GEN5(dev))
                intel_gpu_ips_init(dev_priv);
 
-       intel_init_runtime_pm(dev_priv);
+       intel_runtime_pm_enable(dev_priv);
 
        return 0;
 
 out_power_well:
-       intel_power_domains_remove(dev_priv);
+       intel_power_domains_fini(dev_priv);
        drm_vblank_cleanup(dev);
 out_gem_unload:
        WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
@@ -1846,16 +876,10 @@ int i915_driver_unload(struct drm_device *dev)
                return ret;
        }
 
-       intel_fini_runtime_pm(dev_priv);
+       intel_power_domains_fini(dev_priv);
 
        intel_gpu_ips_teardown();
 
-       /* The i915.ko module is still not prepared to be loaded when
-        * the power well is not enabled, so just enable it in case
-        * we're going to unload/reload. */
-       intel_display_set_init_power(dev_priv, true);
-       intel_power_domains_remove(dev_priv);
-
        i915_teardown_sysfs(dev);
 
        WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
@@ -1866,8 +890,12 @@ int i915_driver_unload(struct drm_device *dev)
 
        acpi_video_unregister();
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
                intel_fbdev_fini(dev);
+
+       drm_vblank_cleanup(dev);
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                intel_modeset_cleanup(dev);
 
                /*
@@ -1903,13 +931,8 @@ int i915_driver_unload(struct drm_device *dev)
                i915_gem_context_fini(dev);
                mutex_unlock(&dev->struct_mutex);
                i915_gem_cleanup_stolen(dev);
-
-               if (!I915_NEED_GFX_HWS(dev))
-                       i915_free_hws(dev);
        }
 
-       drm_vblank_cleanup(dev);
-
        intel_teardown_gmbus(dev);
        intel_teardown_mchbar(dev);
 
@@ -1957,23 +980,8 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
  */
 void i915_driver_lastclose(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       /* On gen6+ we refuse to init without kms enabled, but then the drm core
-        * goes right around and calls lastclose. Check for this and don't clean
-        * up anything. */
-       if (!dev_priv)
-               return;
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               intel_fbdev_restore_mode(dev);
-               vga_switcheroo_process_delayed_switch();
-               return;
-       }
-
-       i915_gem_lastclose(dev);
-
-       i915_dma_cleanup(dev);
+       intel_fbdev_restore_mode(dev);
+       vga_switcheroo_process_delayed_switch();
 }
 
 void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
@@ -1997,24 +1005,24 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
 }
 
 const struct drm_ioctl_desc i915_ioctls[] = {
-       DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
@@ -2023,8 +1031,8 @@ const struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
index 055d5e7fbf12a14473c7b82f314630d1e5615b9c..1e9c136a874cf1741e09aecc33c6cea37e26e1d4 100644 (file)
@@ -356,6 +356,19 @@ static const struct intel_device_info intel_cherryview_info = {
        CURSOR_OFFSETS,
 };
 
+static const struct intel_device_info intel_skylake_info = {
+       .is_preliminary = 1,
+       .is_skylake = 1,
+       .gen = 9, .num_pipes = 3,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+       .has_llc = 1,
+       .has_ddi = 1,
+       .has_fbc = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
+       IVB_CURSOR_OFFSETS,
+};
+
 /*
  * Make sure any device matches here are from most specific to most
  * general.  For example, since the Quanta match is based on the subsystem
@@ -392,7 +405,8 @@ static const struct intel_device_info intel_cherryview_info = {
        INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),   \
        INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
        INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
-       INTEL_CHV_IDS(&intel_cherryview_info)
+       INTEL_CHV_IDS(&intel_cherryview_info),  \
+       INTEL_SKL_IDS(&intel_skylake_info)
 
 static const struct pci_device_id pciidlist[] = {              /* aka */
        INTEL_PCI_IDS,
@@ -449,7 +463,7 @@ void intel_detect_pch(struct drm_device *dev)
                                dev_priv->pch_type = PCH_LPT;
                                DRM_DEBUG_KMS("Found LynxPoint PCH\n");
                                WARN_ON(!IS_HASWELL(dev));
-                               WARN_ON(IS_ULT(dev));
+                               WARN_ON(IS_HSW_ULT(dev));
                        } else if (IS_BROADWELL(dev)) {
                                dev_priv->pch_type = PCH_LPT;
                                dev_priv->pch_id =
@@ -460,7 +474,15 @@ void intel_detect_pch(struct drm_device *dev)
                                dev_priv->pch_type = PCH_LPT;
                                DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
                                WARN_ON(!IS_HASWELL(dev));
-                               WARN_ON(!IS_ULT(dev));
+                               WARN_ON(!IS_HSW_ULT(dev));
+                       } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_SPT;
+                               DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+                               WARN_ON(!IS_SKYLAKE(dev));
+                       } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_SPT;
+                               DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
+                               WARN_ON(!IS_SKYLAKE(dev));
                        } else
                                continue;
 
@@ -529,10 +551,10 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
 }
 
 static int intel_suspend_complete(struct drm_i915_private *dev_priv);
-static int intel_resume_prepare(struct drm_i915_private *dev_priv,
-                               bool rpm_resume);
+static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
+                             bool rpm_resume);
 
-static int i915_drm_freeze(struct drm_device *dev)
+static int i915_drm_suspend(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
@@ -562,6 +584,8 @@ static int i915_drm_freeze(struct drm_device *dev)
                        return error;
                }
 
+               intel_suspend_gt_powersave(dev);
+
                /*
                 * Disable CRTCs directly since we want to preserve sw state
                 * for _thaw. Also, power gate the CRTC power wells.
@@ -573,16 +597,12 @@ static int i915_drm_freeze(struct drm_device *dev)
 
                intel_dp_mst_suspend(dev);
 
-               flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
-               intel_runtime_pm_disable_interrupts(dev);
+               intel_runtime_pm_disable_interrupts(dev_priv);
                intel_hpd_cancel_work(dev_priv);
 
                intel_suspend_encoders(dev_priv);
 
-               intel_suspend_gt_powersave(dev);
-
-               intel_modeset_suspend_hw(dev);
+               intel_suspend_hw(dev);
        }
 
        i915_gem_suspend_gtt_mappings(dev);
@@ -608,7 +628,26 @@ static int i915_drm_freeze(struct drm_device *dev)
        return 0;
 }
 
-int i915_suspend(struct drm_device *dev, pm_message_t state)
+static int i915_drm_suspend_late(struct drm_device *drm_dev)
+{
+       struct drm_i915_private *dev_priv = drm_dev->dev_private;
+       int ret;
+
+       ret = intel_suspend_complete(dev_priv);
+
+       if (ret) {
+               DRM_ERROR("Suspend complete failed: %d\n", ret);
+
+               return ret;
+       }
+
+       pci_disable_device(drm_dev->pdev);
+       pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+
+       return 0;
+}
+
+int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
 {
        int error;
 
@@ -618,48 +657,25 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
                return -ENODEV;
        }
 
-       if (state.event == PM_EVENT_PRETHAW)
-               return 0;
-
+       if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
+                        state.event != PM_EVENT_FREEZE))
+               return -EINVAL;
 
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       error = i915_drm_freeze(dev);
+       error = i915_drm_suspend(dev);
        if (error)
                return error;
 
-       if (state.event == PM_EVENT_SUSPEND) {
-               /* Shut down the device */
-               pci_disable_device(dev->pdev);
-               pci_set_power_state(dev->pdev, PCI_D3hot);
-       }
-
-       return 0;
-}
-
-static int i915_drm_thaw_early(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
-
-       ret = intel_resume_prepare(dev_priv, false);
-       if (ret)
-               DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
-
-       intel_uncore_early_sanitize(dev, true);
-       intel_uncore_sanitize(dev);
-       intel_power_domains_init_hw(dev_priv);
-
-       return ret;
+       return i915_drm_suspend_late(dev);
 }
 
-static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
+static int i915_drm_resume(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET) &&
-           restore_gtt_mappings) {
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                mutex_lock(&dev->struct_mutex);
                i915_gem_restore_gtt_mappings(dev);
                mutex_unlock(&dev->struct_mutex);
@@ -680,17 +696,15 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
                }
                mutex_unlock(&dev->struct_mutex);
 
-               intel_runtime_pm_restore_interrupts(dev);
+               /* We need working interrupts for modeset enabling ... */
+               intel_runtime_pm_enable_interrupts(dev_priv);
 
                intel_modeset_init_hw(dev);
 
-               {
-                       unsigned long irqflags;
-                       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-                       if (dev_priv->display.hpd_irq_setup)
-                               dev_priv->display.hpd_irq_setup(dev);
-                       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-               }
+               spin_lock_irq(&dev_priv->irq_lock);
+               if (dev_priv->display.hpd_irq_setup)
+                       dev_priv->display.hpd_irq_setup(dev);
+               spin_unlock_irq(&dev_priv->irq_lock);
 
                intel_dp_mst_resume(dev);
                drm_modeset_lock_all(dev);
@@ -703,7 +717,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
                 * bother with the tiny race here where we might loose hotplug
                 * notifications.
                 * */
-               intel_hpd_init(dev);
+               intel_hpd_init(dev_priv);
                /* Config may have changed between suspend and resume */
                drm_helper_hpd_irq_event(dev);
        }
@@ -718,21 +732,15 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
 
        intel_opregion_notify_adapter(dev, PCI_D0);
 
-       return 0;
-}
-
-static int i915_drm_thaw(struct drm_device *dev)
-{
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               i915_check_and_clear_faults(dev);
+       drm_kms_helper_poll_enable(dev);
 
-       return __i915_drm_thaw(dev, true);
+       return 0;
 }
 
-static int i915_resume_early(struct drm_device *dev)
+static int i915_drm_resume_early(struct drm_device *dev)
 {
-       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
-               return 0;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret = 0;
 
        /*
         * We have a resume ordering issue with the snd-hda driver also
@@ -748,33 +756,34 @@ static int i915_resume_early(struct drm_device *dev)
 
        pci_set_master(dev->pdev);
 
-       return i915_drm_thaw_early(dev);
+       if (IS_VALLEYVIEW(dev_priv))
+               ret = vlv_resume_prepare(dev_priv, false);
+       if (ret)
+               DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
+
+       intel_uncore_early_sanitize(dev, true);
+
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+               hsw_disable_pc8(dev_priv);
+
+       intel_uncore_sanitize(dev);
+       intel_power_domains_init_hw(dev_priv);
+
+       return ret;
 }
 
-int i915_resume(struct drm_device *dev)
+int i915_resume_legacy(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       /*
-        * Platforms with opregion should have sane BIOS, older ones (gen3 and
-        * earlier) need to restore the GTT mappings since the BIOS might clear
-        * all our scratch PTEs.
-        */
-       ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+               return 0;
+
+       ret = i915_drm_resume_early(dev);
        if (ret)
                return ret;
 
-       drm_kms_helper_poll_enable(dev);
-       return 0;
-}
-
-static int i915_resume_legacy(struct drm_device *dev)
-{
-       i915_resume_early(dev);
-       i915_resume(dev);
-
-       return 0;
+       return i915_drm_resume(dev);
 }
 
 /**
@@ -820,6 +829,9 @@ int i915_reset(struct drm_device *dev)
                }
        }
 
+       if (i915_stop_ring_allow_warn(dev_priv))
+               pr_notice("drm/i915: Resetting chip after gpu hang\n");
+
        if (ret) {
                DRM_ERROR("Failed to reset chip: %i\n", ret);
                mutex_unlock(&dev->struct_mutex);
@@ -840,10 +852,7 @@ int i915_reset(struct drm_device *dev)
         * was running at the time of the reset (i.e. we weren't VT
         * switched away).
         */
-       if (drm_core_check_feature(dev, DRIVER_MODESET) ||
-                       !dev_priv->ums.mm_suspended) {
-               dev_priv->ums.mm_suspended = 0;
-
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
                dev_priv->gpu_error.reload_in_reset = true;
 
@@ -923,15 +932,13 @@ static int i915_pm_suspend(struct device *dev)
        if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       return i915_drm_freeze(drm_dev);
+       return i915_drm_suspend(drm_dev);
 }
 
 static int i915_pm_suspend_late(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       struct drm_i915_private *dev_priv = drm_dev->dev_private;
-       int ret;
 
        /*
         * We have a suspedn ordering issue with the snd-hda driver also
@@ -945,16 +952,7 @@ static int i915_pm_suspend_late(struct device *dev)
        if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       ret = intel_suspend_complete(dev_priv);
-
-       if (ret)
-               DRM_ERROR("Suspend complete failed: %d\n", ret);
-       else {
-               pci_disable_device(pdev);
-               pci_set_power_state(pdev, PCI_D3hot);
-       }
-
-       return ret;
+       return i915_drm_suspend_late(drm_dev);
 }
 
 static int i915_pm_resume_early(struct device *dev)
@@ -962,52 +960,21 @@ static int i915_pm_resume_early(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
-       return i915_resume_early(drm_dev);
-}
-
-static int i915_pm_resume(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct drm_device *drm_dev = pci_get_drvdata(pdev);
-
-       return i915_resume(drm_dev);
-}
-
-static int i915_pm_freeze(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct drm_device *drm_dev = pci_get_drvdata(pdev);
-
-       if (!drm_dev || !drm_dev->dev_private) {
-               dev_err(dev, "DRM not initialized, aborting suspend.\n");
-               return -ENODEV;
-       }
-
-       return i915_drm_freeze(drm_dev);
-}
-
-static int i915_pm_thaw_early(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+               return 0;
 
-       return i915_drm_thaw_early(drm_dev);
+       return i915_drm_resume_early(drm_dev);
 }
 
-static int i915_pm_thaw(struct device *dev)
+static int i915_pm_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
-       return i915_drm_thaw(drm_dev);
-}
-
-static int i915_pm_poweroff(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+               return 0;
 
-       return i915_drm_freeze(drm_dev);
+       return i915_drm_resume(drm_dev);
 }
 
 static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
@@ -1017,25 +984,6 @@ static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
        return 0;
 }
 
-static int snb_resume_prepare(struct drm_i915_private *dev_priv,
-                               bool rpm_resume)
-{
-       struct drm_device *dev = dev_priv->dev;
-
-       if (rpm_resume)
-               intel_init_pch_refclk(dev);
-
-       return 0;
-}
-
-static int hsw_resume_prepare(struct drm_i915_private *dev_priv,
-                               bool rpm_resume)
-{
-       hsw_disable_pc8(dev_priv);
-
-       return 0;
-}
-
 /*
  * Save all Gunit registers that may be lost after a D3 and a subsequent
  * S0i[R123] transition. The list of registers needing a save/restore is
@@ -1440,18 +1388,13 @@ static int intel_runtime_suspend(struct device *device)
        i915_gem_release_all_mmaps(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
-       /*
-        * rps.work can't be rearmed here, since we get here only after making
-        * sure the GPU is idle and the RPS freq is set to the minimum. See
-        * intel_mark_idle().
-        */
-       cancel_work_sync(&dev_priv->rps.work);
-       intel_runtime_pm_disable_interrupts(dev);
+       intel_suspend_gt_powersave(dev);
+       intel_runtime_pm_disable_interrupts(dev_priv);
 
        ret = intel_suspend_complete(dev_priv);
        if (ret) {
                DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
-               intel_runtime_pm_restore_interrupts(dev);
+               intel_runtime_pm_enable_interrupts(dev_priv);
 
                return ret;
        }
@@ -1493,7 +1436,7 @@ static int intel_runtime_resume(struct device *device)
        struct pci_dev *pdev = to_pci_dev(device);
        struct drm_device *dev = pci_get_drvdata(pdev);
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
+       int ret = 0;
 
        if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
                return -ENODEV;
@@ -1503,7 +1446,13 @@ static int intel_runtime_resume(struct device *device)
        intel_opregion_notify_adapter(dev, PCI_D0);
        dev_priv->pm.suspended = false;
 
-       ret = intel_resume_prepare(dev_priv, true);
+       if (IS_GEN6(dev_priv))
+               intel_init_pch_refclk(dev);
+       else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+               hsw_disable_pc8(dev_priv);
+       else if (IS_VALLEYVIEW(dev_priv))
+               ret = vlv_resume_prepare(dev_priv, true);
+
        /*
         * No point of rolling back things in case of an error, as the best
         * we can do is to hope that things will still work (and disable RPM).
@@ -1511,8 +1460,8 @@ static int intel_runtime_resume(struct device *device)
        i915_gem_init_swizzling(dev);
        gen6_update_ring_freq(dev);
 
-       intel_runtime_pm_restore_interrupts(dev);
-       intel_reset_gt_powersave(dev);
+       intel_runtime_pm_enable_interrupts(dev_priv);
+       intel_enable_gt_powersave(dev);
 
        if (ret)
                DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
@@ -1541,40 +1490,41 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv)
        return ret;
 }
 
-/*
- * This function implements common functionality of runtime and system
- * resume sequence. Variable rpm_resume used for implementing different
- * code paths.
- */
-static int intel_resume_prepare(struct drm_i915_private *dev_priv,
-                               bool rpm_resume)
-{
-       struct drm_device *dev = dev_priv->dev;
-       int ret;
-
-       if (IS_GEN6(dev))
-               ret = snb_resume_prepare(dev_priv, rpm_resume);
-       else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-               ret = hsw_resume_prepare(dev_priv, rpm_resume);
-       else if (IS_VALLEYVIEW(dev))
-               ret = vlv_resume_prepare(dev_priv, rpm_resume);
-       else
-               ret = 0;
-
-       return ret;
-}
-
 static const struct dev_pm_ops i915_pm_ops = {
+       /*
+        * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
+        * PMSG_RESUME]
+        */
        .suspend = i915_pm_suspend,
        .suspend_late = i915_pm_suspend_late,
        .resume_early = i915_pm_resume_early,
        .resume = i915_pm_resume,
-       .freeze = i915_pm_freeze,
-       .thaw_early = i915_pm_thaw_early,
-       .thaw = i915_pm_thaw,
-       .poweroff = i915_pm_poweroff,
+
+       /*
+        * S4 event handlers
+        * @freeze, @freeze_late    : called (1) before creating the
+        *                            hibernation image [PMSG_FREEZE] and
+        *                            (2) after rebooting, before restoring
+        *                            the image [PMSG_QUIESCE]
+        * @thaw, @thaw_early       : called (1) after creating the hibernation
+        *                            image, before writing it [PMSG_THAW]
+        *                            and (2) after failing to create or
+        *                            restore the image [PMSG_RECOVER]
+        * @poweroff, @poweroff_late: called after writing the hibernation
+        *                            image, before rebooting [PMSG_HIBERNATE]
+        * @restore, @restore_early : called after rebooting and restoring the
+        *                            hibernation image [PMSG_RESTORE]
+        */
+       .freeze = i915_pm_suspend,
+       .freeze_late = i915_pm_suspend_late,
+       .thaw_early = i915_pm_resume_early,
+       .thaw = i915_pm_resume,
+       .poweroff = i915_pm_suspend,
+       .poweroff_late = i915_pm_suspend_late,
        .restore_early = i915_pm_resume_early,
        .restore = i915_pm_resume,
+
+       /* S0ix (via runtime suspend) event handlers */
        .runtime_suspend = intel_runtime_suspend,
        .runtime_resume = intel_runtime_resume,
 };
@@ -1616,12 +1566,10 @@ static struct drm_driver driver = {
        .set_busid = drm_pci_set_busid,
 
        /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
-       .suspend = i915_suspend,
+       .suspend = i915_suspend_legacy,
        .resume = i915_resume_legacy,
 
        .device_is_agp = i915_driver_device_is_agp,
-       .master_create = i915_master_create,
-       .master_destroy = i915_master_destroy,
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = i915_debugfs_init,
        .debugfs_cleanup = i915_debugfs_cleanup,
@@ -1635,7 +1583,7 @@ static struct drm_driver driver = {
        .gem_prime_import = i915_gem_prime_import,
 
        .dumb_create = i915_gem_dumb_create,
-       .dumb_map_offset = i915_gem_mmap_gtt,
+       .dumb_map_offset = i915_gem_dumb_map_offset,
        .dumb_destroy = drm_gem_dumb_destroy,
        .ioctls = i915_ioctls,
        .fops = &i915_driver_fops,
index 16a6f6d187a193ca8cee8f9cae444a75aa7c4533..bb1892d72efefe14bf78ce4633405d51e2251b32 100644 (file)
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20140905"
+#define DRIVER_DATE            "20141121"
+
+#undef WARN_ON
+#define WARN_ON(x)             WARN(x, "WARN_ON(" #x ")")
 
 enum pipe {
        INVALID_PIPE = -1,
@@ -76,6 +79,14 @@ enum transcoder {
 };
 #define transcoder_name(t) ((t) + 'A')
 
+/*
+ * This is the maximum (across all platforms) number of planes (primary +
+ * sprites) that can be active at the same time on one pipe.
+ *
+ * This value doesn't count the cursor plane.
+ */
+#define I915_MAX_PLANES        3
+
 enum plane {
        PLANE_A = 0,
        PLANE_B,
@@ -202,10 +213,15 @@ enum intel_dpll_id {
        /* real shared dpll ids must be >= 0 */
        DPLL_ID_PCH_PLL_A = 0,
        DPLL_ID_PCH_PLL_B = 1,
+       /* hsw/bdw */
        DPLL_ID_WRPLL1 = 0,
        DPLL_ID_WRPLL2 = 1,
+       /* skl */
+       DPLL_ID_SKL_DPLL1 = 0,
+       DPLL_ID_SKL_DPLL2 = 1,
+       DPLL_ID_SKL_DPLL3 = 2,
 };
-#define I915_NUM_PLLS 2
+#define I915_NUM_PLLS 3
 
 struct intel_dpll_hw_state {
        /* i9xx, pch plls */
@@ -216,16 +232,33 @@ struct intel_dpll_hw_state {
 
        /* hsw, bdw */
        uint32_t wrpll;
+
+       /* skl */
+       /*
+        * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
+        * lower part of crtl1 and they get shifted into position when writing
+        * the register.  This allows us to easily compare the state to share
+        * the DPLL.
+        */
+       uint32_t ctrl1;
+       /* HDMI only, 0 when used for DP */
+       uint32_t cfgcr1, cfgcr2;
+};
+
+struct intel_shared_dpll_config {
+       unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
+       struct intel_dpll_hw_state hw_state;
 };
 
 struct intel_shared_dpll {
-       int refcount; /* count of number of CRTCs sharing this PLL */
+       struct intel_shared_dpll_config config;
+       struct intel_shared_dpll_config *new_config;
+
        int active; /* count of number of active CRTCs (i.e. DPMS on) */
        bool on; /* is the PLL actually active? Disabled during modeset */
        const char *name;
        /* should match the index in the dev_priv->shared_dplls array */
        enum intel_dpll_id id;
-       struct intel_dpll_hw_state hw_state;
        /* The mode_set hook is optional and should be used together with the
         * intel_prepare_shared_dpll function. */
        void (*mode_set)(struct drm_i915_private *dev_priv,
@@ -239,6 +272,11 @@ struct intel_shared_dpll {
                             struct intel_dpll_hw_state *hw_state);
 };
 
+#define SKL_DPLL0 0
+#define SKL_DPLL1 1
+#define SKL_DPLL2 2
+#define SKL_DPLL3 3
+
 /* Used by dp and fdi links */
 struct intel_link_m_n {
        uint32_t        tu;
@@ -267,7 +305,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
 #define DRIVER_PATCHLEVEL      0
 
 #define WATCH_LISTS    0
-#define WATCH_GTT      0
 
 struct opregion_header;
 struct opregion_acpi;
@@ -290,12 +327,6 @@ struct intel_opregion {
 struct intel_overlay;
 struct intel_overlay_error_state;
 
-struct drm_local_map;
-
-struct drm_i915_master_private {
-       struct drm_local_map *sarea;
-       struct _drm_i915_sarea *sarea_priv;
-};
 #define I915_FENCE_REG_NONE -1
 #define I915_MAX_NUM_FENCES 32
 /* 32 fences + sign bit for FENCE_REG_NONE */
@@ -426,6 +457,7 @@ struct drm_i915_error_state {
 };
 
 struct intel_connector;
+struct intel_encoder;
 struct intel_crtc_config;
 struct intel_plane_config;
 struct intel_crtc;
@@ -452,7 +484,7 @@ struct drm_i915_display_funcs {
         * Returns true on success, false on failure.
         */
        bool (*find_dpll)(const struct intel_limit *limit,
-                         struct drm_crtc *crtc,
+                         struct intel_crtc *crtc,
                          int target, int refclk,
                          struct dpll *match_clock,
                          struct dpll *best_clock);
@@ -468,15 +500,14 @@ struct drm_i915_display_funcs {
                                struct intel_crtc_config *);
        void (*get_plane_config)(struct intel_crtc *,
                                 struct intel_plane_config *);
-       int (*crtc_mode_set)(struct drm_crtc *crtc,
-                            int x, int y,
-                            struct drm_framebuffer *old_fb);
+       int (*crtc_compute_clock)(struct intel_crtc *crtc);
        void (*crtc_enable)(struct drm_crtc *crtc);
        void (*crtc_disable)(struct drm_crtc *crtc);
        void (*off)(struct drm_crtc *crtc);
-       void (*write_eld)(struct drm_connector *connector,
-                         struct drm_crtc *crtc,
-                         struct drm_display_mode *mode);
+       void (*audio_codec_enable)(struct drm_connector *connector,
+                                  struct intel_encoder *encoder,
+                                  struct drm_display_mode *mode);
+       void (*audio_codec_disable)(struct intel_encoder *encoder);
        void (*fdi_link_train)(struct drm_crtc *crtc);
        void (*init_clock_gating)(struct drm_device *dev);
        int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
@@ -494,7 +525,7 @@ struct drm_i915_display_funcs {
        /* display clock increase/decrease */
        /* pll clock increase/decrease */
 
-       int (*setup_backlight)(struct intel_connector *connector);
+       int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe);
        uint32_t (*get_backlight)(struct intel_connector *connector);
        void (*set_backlight)(struct intel_connector *connector,
                              uint32_t level);
@@ -533,6 +564,7 @@ struct intel_uncore {
 
        unsigned fw_rendercount;
        unsigned fw_mediacount;
+       unsigned fw_blittercount;
 
        struct timer_list force_wake_timer;
 };
@@ -551,6 +583,7 @@ struct intel_uncore {
        func(is_ivybridge) sep \
        func(is_valleyview) sep \
        func(is_haswell) sep \
+       func(is_skylake) sep \
        func(is_preliminary) sep \
        func(has_fbc) sep \
        func(has_pipe_cxsr) sep \
@@ -646,6 +679,7 @@ struct intel_context {
        struct {
                struct drm_i915_gem_object *state;
                struct intel_ringbuffer *ringbuf;
+               int unpin_count;
        } engine[I915_NUM_RINGS];
 
        struct list_head link;
@@ -663,6 +697,18 @@ struct i915_fbc {
 
        bool false_color;
 
+       /* Tracks whether the HW is actually enabled, not whether the feature is
+        * possible. */
+       bool enabled;
+
+       /* On gen8 some rings cannont perform fbc clean operation so for now
+        * we are doing this on SW with mmio.
+        * This variable works in the opposite information direction
+        * of ring->fbc_dirty telling software on frontbuffer tracking
+        * to perform the cache clean on sw side.
+        */
+       bool need_sw_cache_clean;
+
        struct intel_fbc_work {
                struct delayed_work work;
                struct drm_crtc *crtc;
@@ -704,6 +750,7 @@ enum intel_pch {
        PCH_IBX,        /* Ibexpeak PCH */
        PCH_CPT,        /* Cougarpoint PCH */
        PCH_LPT,        /* Lynxpoint PCH */
+       PCH_SPT,        /* Sunrisepoint PCH */
        PCH_NOP,
 };
 
@@ -717,6 +764,7 @@ enum intel_sbi_destination {
 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
 #define QUIRK_PIPEB_FORCE (1<<4)
+#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
 
 struct intel_fbdev;
 struct intel_fbc_work;
@@ -768,7 +816,6 @@ struct i915_suspend_saved_registers {
        u32 saveBLC_HIST_CTL;
        u32 saveBLC_PWM_CTL;
        u32 saveBLC_PWM_CTL2;
-       u32 saveBLC_HIST_CTL_B;
        u32 saveBLC_CPU_PWM_CTL;
        u32 saveBLC_CPU_PWM_CTL2;
        u32 saveFPB0;
@@ -947,8 +994,12 @@ struct intel_rps_ei {
 };
 
 struct intel_gen6_power_mgmt {
-       /* work and pm_iir are protected by dev_priv->irq_lock */
+       /*
+        * work, interrupts_enabled and pm_iir are protected by
+        * dev_priv->irq_lock
+        */
        struct work_struct work;
+       bool interrupts_enabled;
        u32 pm_iir;
 
        /* Frequencies are stored in potentially platform dependent multiples.
@@ -1071,31 +1122,6 @@ struct i915_power_domains {
        struct i915_power_well *power_wells;
 };
 
-struct i915_dri1_state {
-       unsigned allow_batchbuffer : 1;
-       u32 __iomem *gfx_hws_cpu_addr;
-
-       unsigned int cpp;
-       int back_offset;
-       int front_offset;
-       int current_page;
-       int page_flipping;
-
-       uint32_t counter;
-};
-
-struct i915_ums_state {
-       /**
-        * Flag if the X Server, and thus DRM, is not currently in
-        * control of the device.
-        *
-        * This is set between LeaveVT and EnterVT.  It needs to be
-        * replaced with a semaphore.  It also needs to be
-        * transitioned away from for kernel modesetting.
-        */
-       int mm_suspended;
-};
-
 #define MAX_L3_SLICES 2
 struct intel_l3_parity {
        u32 *remap_info[MAX_L3_SLICES];
@@ -1357,6 +1383,49 @@ struct ilk_wm_values {
        enum intel_ddb_partitioning partitioning;
 };
 
+struct skl_ddb_entry {
+       uint16_t start, end;    /* in number of blocks, 'end' is exclusive */
+};
+
+static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
+{
+       return entry->end - entry->start;
+}
+
+static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
+                                      const struct skl_ddb_entry *e2)
+{
+       if (e1->start == e2->start && e1->end == e2->end)
+               return true;
+
+       return false;
+}
+
+struct skl_ddb_allocation {
+       struct skl_ddb_entry pipe[I915_MAX_PIPES];
+       struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
+       struct skl_ddb_entry cursor[I915_MAX_PIPES];
+};
+
+struct skl_wm_values {
+       bool dirty[I915_MAX_PIPES];
+       struct skl_ddb_allocation ddb;
+       uint32_t wm_linetime[I915_MAX_PIPES];
+       uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
+       uint32_t cursor[I915_MAX_PIPES][8];
+       uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
+       uint32_t cursor_trans[I915_MAX_PIPES];
+};
+
+struct skl_wm_level {
+       bool plane_en[I915_MAX_PLANES];
+       bool cursor_en;
+       uint16_t plane_res_b[I915_MAX_PLANES];
+       uint8_t plane_res_l[I915_MAX_PLANES];
+       uint16_t cursor_res_b;
+       uint8_t cursor_res_l;
+};
+
 /*
  * This struct helps tracking the state needed for runtime PM, which puts the
  * device in PCI D3 state. Notice that when this happens, nothing on the
@@ -1369,7 +1438,7 @@ struct ilk_wm_values {
  *
  * Our driver uses the autosuspend delay feature, which means we'll only really
  * suspend if we stay with zero refcount for a certain amount of time. The
- * default value is currently very conservative (see intel_init_runtime_pm), but
+ * default value is currently very conservative (see intel_runtime_pm_enable), but
  * it can be changed with the standard runtime PM files from sysfs.
  *
  * The irqs_disabled variable becomes true exactly after we disable the IRQs and
@@ -1382,7 +1451,7 @@ struct ilk_wm_values {
  */
 struct i915_runtime_pm {
        bool suspended;
-       bool _irqs_disabled;
+       bool irqs_enabled;
 };
 
 enum intel_pipe_crc_source {
@@ -1426,6 +1495,20 @@ struct i915_frontbuffer_tracking {
        unsigned flip_bits;
 };
 
+struct i915_wa_reg {
+       u32 addr;
+       u32 value;
+       /* bitmask representing WA bits */
+       u32 mask;
+};
+
+#define I915_MAX_WA_REGS 16
+
+struct i915_workarounds {
+       struct i915_wa_reg reg[I915_MAX_WA_REGS];
+       u32 count;
+};
+
 struct drm_i915_private {
        struct drm_device *dev;
        struct kmem_cache *slab;
@@ -1505,11 +1588,13 @@ struct drm_i915_private {
        struct intel_opregion opregion;
        struct intel_vbt_data vbt;
 
+       bool preserve_bios_swizzle;
+
        /* overlay */
        struct intel_overlay *overlay;
 
        /* backlight registers and fields in struct intel_panel */
-       spinlock_t backlight_lock;
+       struct mutex backlight_lock;
 
        /* LVDS info */
        bool no_aux_handshake;
@@ -1523,6 +1608,7 @@ struct drm_i915_private {
 
        unsigned int fsb_freq, mem_freq, is_ddr3;
        unsigned int vlv_cdclk_freq;
+       unsigned int hpll_freq;
 
        /**
         * wq - Driver workqueue for GEM.
@@ -1568,19 +1654,7 @@ struct drm_i915_private {
        struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
        int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
-       /*
-        * workarounds are currently applied at different places and
-        * changes are being done to consolidate them so exact count is
-        * not clear at this point, use a max value for now.
-        */
-#define I915_MAX_WA_REGS  16
-       struct {
-               u32 addr;
-               u32 value;
-               /* bitmask representing WA bits */
-               u32 mask;
-       } intel_wa_regs[I915_MAX_WA_REGS];
-       u32 num_wa_regs;
+       struct i915_workarounds workarounds;
 
        /* Reclocking support */
        bool render_reclock_avail;
@@ -1644,9 +1718,25 @@ struct drm_i915_private {
                uint16_t spr_latency[5];
                /* cursor */
                uint16_t cur_latency[5];
+               /*
+                * Raw watermark memory latency values
+                * for SKL for all 8 levels
+                * in 1us units.
+                */
+               uint16_t skl_latency[8];
+
+               /*
+                * The skl_wm_values structure is a bit too big for stack
+                * allocation, so we keep the staging struct where we store
+                * intermediate results here instead.
+                */
+               struct skl_wm_values skl_results;
 
                /* current hardware state */
-               struct ilk_wm_values hw;
+               union {
+                       struct ilk_wm_values hw;
+                       struct skl_wm_values skl_hw;
+               };
        } wm;
 
        struct i915_runtime_pm pm;
@@ -1667,12 +1757,6 @@ struct drm_i915_private {
 
        uint32_t bios_vgacntr;
 
-       /* Old dri1 support infrastructure, beware the dragons ya fools entering
-        * here! */
-       struct i915_dri1_state dri1;
-       /* Old ums support infrastructure, same warning applies. */
-       struct i915_ums_state ums;
-
        /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
        struct {
                int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
@@ -1830,8 +1914,6 @@ struct drm_i915_gem_object {
        unsigned long gt_ro:1;
        unsigned int cache_level:3;
 
-       unsigned int has_aliasing_ppgtt_mapping:1;
-       unsigned int has_global_gtt_mapping:1;
        unsigned int has_dma_mapping:1;
 
        unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
@@ -1864,10 +1946,10 @@ struct drm_i915_gem_object {
        unsigned long user_pin_count;
        struct drm_file *pin_filp;
 
-       /** for phy allocated objects */
-       struct drm_dma_handle *phys_handle;
-
        union {
+               /** for phy allocated objects */
+               struct drm_dma_handle *phys_handle;
+
                struct i915_gem_userptr {
                        uintptr_t ptr;
                        unsigned read_only :1;
@@ -2073,6 +2155,7 @@ struct drm_i915_cmd_table {
 #define IS_CHERRYVIEW(dev)     (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
 #define IS_HASWELL(dev)        (INTEL_INFO(dev)->is_haswell)
 #define IS_BROADWELL(dev)      (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
+#define IS_SKYLAKE(dev)        (INTEL_INFO(dev)->is_skylake)
 #define IS_MOBILE(dev)         (INTEL_INFO(dev)->is_mobile)
 #define IS_HSW_EARLY_SDV(dev)  (IS_HASWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
@@ -2080,9 +2163,10 @@ struct drm_i915_cmd_table {
                                 ((INTEL_DEVID(dev) & 0xf) == 0x2  || \
                                 (INTEL_DEVID(dev) & 0xf) == 0x6 || \
                                 (INTEL_DEVID(dev) & 0xf) == 0xe))
+#define IS_BDW_GT3(dev)                (IS_BROADWELL(dev) && \
+                                (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
 #define IS_HSW_ULT(dev)                (IS_HASWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
-#define IS_ULT(dev)            (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
 #define IS_HSW_GT3(dev)                (IS_HASWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
 /* ULX machines are also considered ULT. */
@@ -2103,6 +2187,7 @@ struct drm_i915_cmd_table {
 #define IS_GEN6(dev)   (INTEL_INFO(dev)->gen == 6)
 #define IS_GEN7(dev)   (INTEL_INFO(dev)->gen == 7)
 #define IS_GEN8(dev)   (INTEL_INFO(dev)->gen == 8)
+#define IS_GEN9(dev)   (INTEL_INFO(dev)->gen == 9)
 
 #define RENDER_RING            (1<<RCS)
 #define BSD_RING               (1<<VCS)
@@ -2115,13 +2200,11 @@ struct drm_i915_cmd_table {
 #define HAS_VEBOX(dev)         (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
 #define HAS_LLC(dev)           (INTEL_INFO(dev)->has_llc)
 #define HAS_WT(dev)            ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
-                                to_i915(dev)->ellc_size)
+                                __I915__(dev)->ellc_size)
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
 
 #define HAS_HW_CONTEXTS(dev)   (INTEL_INFO(dev)->gen >= 6)
 #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
-#define HAS_ALIASING_PPGTT(dev)        (INTEL_INFO(dev)->gen >= 6)
-#define HAS_PPGTT(dev)         (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
 #define USES_PPGTT(dev)                (i915.enable_ppgtt)
 #define USES_FULL_PPGTT(dev)   (i915.enable_ppgtt == 2)
 
@@ -2154,13 +2237,15 @@ struct drm_i915_cmd_table {
 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
 
-#define HAS_IPS(dev)           (IS_ULT(dev) || IS_BROADWELL(dev))
+#define HAS_IPS(dev)           (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
 
 #define HAS_DDI(dev)           (INTEL_INFO(dev)->has_ddi)
 #define HAS_FPGA_DBG_UNCLAIMED(dev)    (INTEL_INFO(dev)->has_fpga_dbg)
 #define HAS_PSR(dev)           (IS_HASWELL(dev) || IS_BROADWELL(dev))
 #define HAS_RUNTIME_PM(dev)    (IS_GEN6(dev) || IS_HASWELL(dev) || \
                                 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
+#define HAS_RC6(dev)           (INTEL_INFO(dev)->gen >= 6)
+#define HAS_RC6p(dev)          (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
 
 #define INTEL_PCH_DEVICE_ID_MASK               0xff00
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE           0x3b00
@@ -2168,8 +2253,11 @@ struct drm_i915_cmd_table {
 #define INTEL_PCH_PPT_DEVICE_ID_TYPE           0x1e00
 #define INTEL_PCH_LPT_DEVICE_ID_TYPE           0x8c00
 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE                0x9c00
+#define INTEL_PCH_SPT_DEVICE_ID_TYPE           0xA100
+#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE                0x9D00
 
-#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
+#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
+#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@@ -2189,8 +2277,8 @@ struct drm_i915_cmd_table {
 extern const struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 
-extern int i915_suspend(struct drm_device *dev, pm_message_t state);
-extern int i915_resume(struct drm_device *dev);
+extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
+extern int i915_resume_legacy(struct drm_device *dev);
 extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
 
@@ -2227,8 +2315,6 @@ struct i915_params {
 extern struct i915_params i915 __read_mostly;
 
                                /* i915_dma.c */
-void i915_update_dri1_breadcrumb(struct drm_device *dev);
-extern void i915_kernel_lost_context(struct drm_device * dev);
 extern int i915_driver_load(struct drm_device *, unsigned long flags);
 extern int i915_driver_unload(struct drm_device *);
 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
@@ -2242,9 +2328,6 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
                              unsigned long arg);
 #endif
-extern int i915_emit_box(struct drm_device *dev,
-                        struct drm_clip_rect *box,
-                        int DR1, int DR4);
 extern int intel_gpu_reset(struct drm_device *dev);
 extern int i915_reset(struct drm_device *dev);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2260,10 +2343,10 @@ __printf(3, 4)
 void i915_handle_error(struct drm_device *dev, bool wedged,
                       const char *fmt, ...);
 
-void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
-                                                       int new_delay);
-extern void intel_irq_init(struct drm_device *dev);
-extern void intel_hpd_init(struct drm_device *dev);
+extern void intel_irq_init(struct drm_i915_private *dev_priv);
+extern void intel_hpd_init(struct drm_i915_private *dev_priv);
+int intel_irq_install(struct drm_i915_private *dev_priv);
+void intel_irq_uninstall(struct drm_i915_private *dev_priv);
 
 extern void intel_uncore_sanitize(struct drm_device *dev);
 extern void intel_uncore_early_sanitize(struct drm_device *dev,
@@ -2283,10 +2366,19 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
 
 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
+void
+ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
+void
+ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
+void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+                                 uint32_t interrupt_mask,
+                                 uint32_t enabled_irq_mask);
+#define ibx_enable_display_interrupt(dev_priv, bits) \
+       ibx_display_interrupt_update((dev_priv), (bits), (bits))
+#define ibx_disable_display_interrupt(dev_priv, bits) \
+       ibx_display_interrupt_update((dev_priv), (bits), 0)
 
 /* i915_gem.c */
-int i915_gem_init_ioctl(struct drm_device *dev, void *data,
-                       struct drm_file *file_priv);
 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *file_priv);
 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
@@ -2333,10 +2425,6 @@ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                           struct drm_file *file_priv);
-int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
-                          struct drm_file *file_priv);
-int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
-                          struct drm_file *file_priv);
 int i915_gem_set_tiling(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 int i915_gem_get_tiling(struct drm_device *dev, void *data,
@@ -2379,7 +2467,6 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
-void i915_gem_lastclose(struct drm_device *dev);
 
 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
                                    int *needs_clflush);
@@ -2413,8 +2500,9 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
-int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
-                     uint32_t handle, uint64_t *offset);
+int i915_gem_dumb_map_offset(struct drm_file *file_priv,
+                            struct drm_device *dev, uint32_t handle,
+                            uint64_t *offset);
 /**
  * Returns true if seq1 is later than seq2.
  */
@@ -2486,6 +2574,11 @@ int __i915_add_request(struct intel_engine_cs *ring,
                       u32 *seqno);
 #define i915_add_request(ring, seqno) \
        __i915_add_request(ring, NULL, NULL, seqno)
+int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+                       unsigned reset_counter,
+                       bool interruptible,
+                       s64 *timeout,
+                       struct drm_i915_file_private *file_priv);
 int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
                                 uint32_t seqno);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -2755,7 +2848,6 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
 extern void intel_i2c_reset(struct drm_device *dev);
 
 /* intel_opregion.c */
-struct intel_encoder;
 #ifdef CONFIG_ACPI
 extern int intel_opregion_setup(struct drm_device *dev);
 extern void intel_opregion_init(struct drm_device *dev);
@@ -2793,7 +2885,6 @@ static inline void intel_unregister_dsm_handler(void) { return; }
 
 /* modesetting */
 extern void intel_modeset_init_hw(struct drm_device *dev);
-extern void intel_modeset_suspend_hw(struct drm_device *dev);
 extern void intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_gem_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -2804,7 +2895,7 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
 extern void i915_redisable_vga(struct drm_device *dev);
 extern void i915_redisable_vga_power_on(struct drm_device *dev);
 extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void gen8_fbc_sw_flush(struct drm_device *dev, u32 value);
+extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
 extern void intel_disable_fbc(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
 extern void intel_init_pch_refclk(struct drm_device *dev);
@@ -2842,8 +2933,8 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
 void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
 
-int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
 
 /* intel_sideband.c */
 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
@@ -2873,7 +2964,9 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
 
 #define FORCEWAKE_RENDER       (1 << 0)
 #define FORCEWAKE_MEDIA                (1 << 1)
-#define FORCEWAKE_ALL          (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
+#define FORCEWAKE_BLITTER      (1 << 2)
+#define FORCEWAKE_ALL          (FORCEWAKE_RENDER | FORCEWAKE_MEDIA | \
+                                       FORCEWAKE_BLITTER)
 
 
 #define I915_READ8(reg)                dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
index 28f91df2604db0bfb867a548e701a5e9a48b7fa3..d2ba315f4c92c846f29fa7e8d00782fdb66e5b91 100644 (file)
@@ -159,33 +159,6 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
        return i915_gem_obj_bound_any(obj) && !obj->active;
 }
 
-int
-i915_gem_init_ioctl(struct drm_device *dev, void *data,
-                   struct drm_file *file)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_init *args = data;
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -ENODEV;
-
-       if (args->gtt_start >= args->gtt_end ||
-           (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
-               return -EINVAL;
-
-       /* GEM with user mode setting was never supported on ilk and later. */
-       if (INTEL_INFO(dev)->gen >= 5)
-               return -ENODEV;
-
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
-                                 args->gtt_end);
-       dev_priv->gtt.mappable_end = args->gtt_end;
-       mutex_unlock(&dev->struct_mutex);
-
-       return 0;
-}
-
 int
 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *file)
@@ -208,40 +181,137 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
+static int
+i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 {
-       drm_dma_handle_t *phys = obj->phys_handle;
+       struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+       char *vaddr = obj->phys_handle->vaddr;
+       struct sg_table *st;
+       struct scatterlist *sg;
+       int i;
 
-       if (!phys)
-               return;
+       if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
+               return -EINVAL;
+
+       for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+               struct page *page;
+               char *src;
+
+               page = shmem_read_mapping_page(mapping, i);
+               if (IS_ERR(page))
+                       return PTR_ERR(page);
+
+               src = kmap_atomic(page);
+               memcpy(vaddr, src, PAGE_SIZE);
+               drm_clflush_virt_range(vaddr, PAGE_SIZE);
+               kunmap_atomic(src);
+
+               page_cache_release(page);
+               vaddr += PAGE_SIZE;
+       }
+
+       i915_gem_chipset_flush(obj->base.dev);
+
+       st = kmalloc(sizeof(*st), GFP_KERNEL);
+       if (st == NULL)
+               return -ENOMEM;
+
+       if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+               kfree(st);
+               return -ENOMEM;
+       }
+
+       sg = st->sgl;
+       sg->offset = 0;
+       sg->length = obj->base.size;
+
+       sg_dma_address(sg) = obj->phys_handle->busaddr;
+       sg_dma_len(sg) = obj->base.size;
+
+       obj->pages = st;
+       obj->has_dma_mapping = true;
+       return 0;
+}
+
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
+{
+       int ret;
+
+       BUG_ON(obj->madv == __I915_MADV_PURGED);
+
+       ret = i915_gem_object_set_to_cpu_domain(obj, true);
+       if (ret) {
+               /* In the event of a disaster, abandon all caches and
+                * hope for the best.
+                */
+               WARN_ON(ret != -EIO);
+               obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       }
+
+       if (obj->madv == I915_MADV_DONTNEED)
+               obj->dirty = 0;
 
-       if (obj->madv == I915_MADV_WILLNEED) {
+       if (obj->dirty) {
                struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
-               char *vaddr = phys->vaddr;
+               char *vaddr = obj->phys_handle->vaddr;
                int i;
 
                for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
-                       struct page *page = shmem_read_mapping_page(mapping, i);
-                       if (!IS_ERR(page)) {
-                               char *dst = kmap_atomic(page);
-                               memcpy(dst, vaddr, PAGE_SIZE);
-                               drm_clflush_virt_range(dst, PAGE_SIZE);
-                               kunmap_atomic(dst);
-
-                               set_page_dirty(page);
+                       struct page *page;
+                       char *dst;
+
+                       page = shmem_read_mapping_page(mapping, i);
+                       if (IS_ERR(page))
+                               continue;
+
+                       dst = kmap_atomic(page);
+                       drm_clflush_virt_range(vaddr, PAGE_SIZE);
+                       memcpy(dst, vaddr, PAGE_SIZE);
+                       kunmap_atomic(dst);
+
+                       set_page_dirty(page);
+                       if (obj->madv == I915_MADV_WILLNEED)
                                mark_page_accessed(page);
-                               page_cache_release(page);
-                       }
+                       page_cache_release(page);
                        vaddr += PAGE_SIZE;
                }
-               i915_gem_chipset_flush(obj->base.dev);
+               obj->dirty = 0;
        }
 
-#ifdef CONFIG_X86
-       set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
-#endif
-       drm_pci_free(obj->base.dev, phys);
-       obj->phys_handle = NULL;
+       sg_free_table(obj->pages);
+       kfree(obj->pages);
+
+       obj->has_dma_mapping = false;
+}
+
+static void
+i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
+{
+       drm_pci_free(obj->base.dev, obj->phys_handle);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
+       .get_pages = i915_gem_object_get_pages_phys,
+       .put_pages = i915_gem_object_put_pages_phys,
+       .release = i915_gem_object_release_phys,
+};
+
+static int
+drop_pages(struct drm_i915_gem_object *obj)
+{
+       struct i915_vma *vma, *next;
+       int ret;
+
+       drm_gem_object_reference(&obj->base);
+       list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
+               if (i915_vma_unbind(vma))
+                       break;
+
+       ret = i915_gem_object_put_pages(obj);
+       drm_gem_object_unreference(&obj->base);
+
+       return ret;
 }
 
 int
@@ -249,9 +319,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
                            int align)
 {
        drm_dma_handle_t *phys;
-       struct address_space *mapping;
-       char *vaddr;
-       int i;
+       int ret;
 
        if (obj->phys_handle) {
                if ((unsigned long)obj->phys_handle->vaddr & (align -1))
@@ -266,41 +334,19 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
        if (obj->base.filp == NULL)
                return -EINVAL;
 
+       ret = drop_pages(obj);
+       if (ret)
+               return ret;
+
        /* create a new object */
        phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
        if (!phys)
                return -ENOMEM;
 
-       vaddr = phys->vaddr;
-#ifdef CONFIG_X86
-       set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
-#endif
-       mapping = file_inode(obj->base.filp)->i_mapping;
-       for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
-               struct page *page;
-               char *src;
-
-               page = shmem_read_mapping_page(mapping, i);
-               if (IS_ERR(page)) {
-#ifdef CONFIG_X86
-                       set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
-#endif
-                       drm_pci_free(obj->base.dev, phys);
-                       return PTR_ERR(page);
-               }
-
-               src = kmap_atomic(page);
-               memcpy(vaddr, src, PAGE_SIZE);
-               kunmap_atomic(src);
-
-               mark_page_accessed(page);
-               page_cache_release(page);
-
-               vaddr += PAGE_SIZE;
-       }
-
        obj->phys_handle = phys;
-       return 0;
+       obj->ops = &i915_gem_phys_ops;
+
+       return i915_gem_object_get_pages(obj);
 }
 
 static int
@@ -311,6 +357,14 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
        struct drm_device *dev = obj->base.dev;
        void *vaddr = obj->phys_handle->vaddr + args->offset;
        char __user *user_data = to_user_ptr(args->data_ptr);
+       int ret;
+
+       /* We manually control the domain here and pretend that it
+        * remains coherent i.e. in the GTT domain, like shmem_pwrite.
+        */
+       ret = i915_gem_object_wait_rendering(obj, false);
+       if (ret)
+               return ret;
 
        if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
                unsigned long unwritten;
@@ -326,6 +380,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
                        return -EFAULT;
        }
 
+       drm_clflush_virt_range(vaddr, args->size);
        i915_gem_chipset_flush(dev);
        return 0;
 }
@@ -346,6 +401,7 @@ static int
 i915_gem_create(struct drm_file *file,
                struct drm_device *dev,
                uint64_t size,
+               bool dumb,
                uint32_t *handle_p)
 {
        struct drm_i915_gem_object *obj;
@@ -361,6 +417,7 @@ i915_gem_create(struct drm_file *file,
        if (obj == NULL)
                return -ENOMEM;
 
+       obj->base.dumb = dumb;
        ret = drm_gem_handle_create(file, &obj->base, &handle);
        /* drop reference from allocate - handle holds it now */
        drm_gem_object_unreference_unlocked(&obj->base);
@@ -380,7 +437,7 @@ i915_gem_dumb_create(struct drm_file *file,
        args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
        args->size = args->pitch * args->height;
        return i915_gem_create(file, dev,
-                              args->size, &args->handle);
+                              args->size, true, &args->handle);
 }
 
 /**
@@ -393,7 +450,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_create *args = data;
 
        return i915_gem_create(file, dev,
-                              args->size, &args->handle);
+                              args->size, false, &args->handle);
 }
 
 static inline int
@@ -1046,11 +1103,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
         * pread/pwrite currently are reading and writing from the CPU
         * perspective, requiring manual detiling by the client.
         */
-       if (obj->phys_handle) {
-               ret = i915_gem_phys_pwrite(obj, args, file);
-               goto out;
-       }
-
        if (obj->tiling_mode == I915_TILING_NONE &&
            obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
            cpu_write_needs_clflush(obj)) {
@@ -1060,8 +1112,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                 * textures). Fallback to the shmem path in that case. */
        }
 
-       if (ret == -EFAULT || ret == -ENOSPC)
-               ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+       if (ret == -EFAULT || ret == -ENOSPC) {
+               if (obj->phys_handle)
+                       ret = i915_gem_phys_pwrite(obj, args, file);
+               else
+                       ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+       }
 
 out:
        drm_gem_object_unreference(&obj->base);
@@ -1134,7 +1190,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
 }
 
 /**
- * __wait_seqno - wait until execution of seqno has finished
+ * __i915_wait_seqno - wait until execution of seqno has finished
  * @ring: the ring expected to report seqno
  * @seqno: duh!
  * @reset_counter: reset sequence associated with the given seqno
@@ -1151,7 +1207,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
  * Returns 0 if the seqno was found within the alloted time. Else returns the
  * errno with remaining time filled in timeout argument.
  */
-static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
                        unsigned reset_counter,
                        bool interruptible,
                        s64 *timeout,
@@ -1262,6 +1318,7 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        bool interruptible = dev_priv->mm.interruptible;
+       unsigned reset_counter;
        int ret;
 
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -1275,14 +1332,13 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
        if (ret)
                return ret;
 
-       return __wait_seqno(ring, seqno,
-                           atomic_read(&dev_priv->gpu_error.reset_counter),
-                           interruptible, NULL, NULL);
+       reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+       return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
+                                NULL, NULL);
 }
 
 static int
-i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
-                                    struct intel_engine_cs *ring)
+i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
 {
        if (!obj->active)
                return 0;
@@ -1319,7 +1375,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       return i915_gem_object_wait_rendering__tail(obj, ring);
+       return i915_gem_object_wait_rendering__tail(obj);
 }
 
 /* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1354,12 +1410,13 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        mutex_unlock(&dev->struct_mutex);
-       ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
+       ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
+                               file_priv);
        mutex_lock(&dev->struct_mutex);
        if (ret)
                return ret;
 
-       return i915_gem_object_wait_rendering__tail(obj, ring);
+       return i915_gem_object_wait_rendering__tail(obj);
 }
 
 /**
@@ -1466,6 +1523,16 @@ unlock:
  *
  * While the mapping holds a reference on the contents of the object, it doesn't
  * imply a ref on the object itself.
+ *
+ * IMPORTANT:
+ *
+ * DRM driver writers who look a this function as an example for how to do GEM
+ * mmap support, please don't implement mmap support like here. The modern way
+ * to implement DRM mmap support is with an mmap offset ioctl (like
+ * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
+ * That way debug tooling like valgrind will understand what's going on, hiding
+ * the mmap call in a driver private ioctl will break that. The i915 driver only
+ * does cpu mmaps this way because we didn't know better.
  */
 int
 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
@@ -1762,10 +1829,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
        drm_gem_free_mmap_offset(&obj->base);
 }
 
-int
+static int
 i915_gem_mmap_gtt(struct drm_file *file,
                  struct drm_device *dev,
-                 uint32_t handle,
+                 uint32_t handle, bool dumb,
                  uint64_t *offset)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1782,6 +1849,13 @@ i915_gem_mmap_gtt(struct drm_file *file,
                goto unlock;
        }
 
+       /*
+        * We don't allow dumb mmaps on objects created using another
+        * interface.
+        */
+       WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
+                 "Illegal dumb map of accelerated buffer.\n");
+
        if (obj->base.size > dev_priv->gtt.mappable_end) {
                ret = -E2BIG;
                goto out;
@@ -1806,6 +1880,15 @@ unlock:
        return ret;
 }
 
+int
+i915_gem_dumb_map_offset(struct drm_file *file,
+                        struct drm_device *dev,
+                        uint32_t handle,
+                        uint64_t *offset)
+{
+       return i915_gem_mmap_gtt(file, dev, handle, true, offset);
+}
+
 /**
  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  * @dev: DRM device
@@ -1827,7 +1910,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_mmap_gtt *args = data;
 
-       return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+       return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
 }
 
 static inline int
@@ -1945,7 +2028,14 @@ unsigned long
 i915_gem_shrink(struct drm_i915_private *dev_priv,
                long target, unsigned flags)
 {
-       const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
+       const struct {
+               struct list_head *list;
+               unsigned int bit;
+       } phases[] = {
+               { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
+               { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
+               { NULL, 0 },
+       }, *phase;
        unsigned long count = 0;
 
        /*
@@ -1967,48 +2057,30 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
         * dev->struct_mutex and so we won't ever be able to observe an
         * object on the bound_list with a reference count equals 0.
         */
-       if (flags & I915_SHRINK_UNBOUND) {
+       for (phase = phases; phase->list; phase++) {
                struct list_head still_in_list;
 
-               INIT_LIST_HEAD(&still_in_list);
-               while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
-                       struct drm_i915_gem_object *obj;
-
-                       obj = list_first_entry(&dev_priv->mm.unbound_list,
-                                              typeof(*obj), global_list);
-                       list_move_tail(&obj->global_list, &still_in_list);
-
-                       if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
-                               continue;
-
-                       drm_gem_object_reference(&obj->base);
-
-                       if (i915_gem_object_put_pages(obj) == 0)
-                               count += obj->base.size >> PAGE_SHIFT;
-
-                       drm_gem_object_unreference(&obj->base);
-               }
-               list_splice(&still_in_list, &dev_priv->mm.unbound_list);
-       }
-
-       if (flags & I915_SHRINK_BOUND) {
-               struct list_head still_in_list;
+               if ((flags & phase->bit) == 0)
+                       continue;
 
                INIT_LIST_HEAD(&still_in_list);
-               while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
+               while (count < target && !list_empty(phase->list)) {
                        struct drm_i915_gem_object *obj;
                        struct i915_vma *vma, *v;
 
-                       obj = list_first_entry(&dev_priv->mm.bound_list,
+                       obj = list_first_entry(phase->list,
                                               typeof(*obj), global_list);
                        list_move_tail(&obj->global_list, &still_in_list);
 
-                       if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+                       if (flags & I915_SHRINK_PURGEABLE &&
+                           !i915_gem_object_is_purgeable(obj))
                                continue;
 
                        drm_gem_object_reference(&obj->base);
 
-                       list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+                       /* For the unbound phase, this should be a no-op! */
+                       list_for_each_entry_safe(vma, v,
+                                                &obj->vma_list, vma_link)
                                if (i915_vma_unbind(vma))
                                        break;
 
@@ -2017,7 +2089,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 
                        drm_gem_object_unreference(&obj->base);
                }
-               list_splice(&still_in_list, &dev_priv->mm.bound_list);
+               list_splice(&still_in_list, phase->list);
        }
 
        return count;
@@ -2122,6 +2194,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_do_bit_17_swizzle(obj);
 
+       if (obj->tiling_mode != I915_TILING_NONE &&
+           dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+               i915_gem_object_pin_pages(obj);
+
        return 0;
 
 err_pages:
@@ -2420,15 +2496,13 @@ int __i915_add_request(struct intel_engine_cs *ring,
        ring->outstanding_lazy_seqno = 0;
        ring->preallocated_lazy_request = NULL;
 
-       if (!dev_priv->ums.mm_suspended) {
-               i915_queue_hangcheck(ring->dev);
+       i915_queue_hangcheck(ring->dev);
 
-               cancel_delayed_work_sync(&dev_priv->mm.idle_work);
-               queue_delayed_work(dev_priv->wq,
-                                  &dev_priv->mm.retire_work,
-                                  round_jiffies_up_relative(HZ));
-               intel_mark_busy(dev_priv->dev);
-       }
+       cancel_delayed_work_sync(&dev_priv->mm.idle_work);
+       queue_delayed_work(dev_priv->wq,
+                          &dev_priv->mm.retire_work,
+                          round_jiffies_up_relative(HZ));
+       intel_mark_busy(dev_priv->dev);
 
        if (out_seqno)
                *out_seqno = request->seqno;
@@ -2495,12 +2569,20 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv,
 
 static void i915_gem_free_request(struct drm_i915_gem_request *request)
 {
+       struct intel_context *ctx = request->ctx;
+
        list_del(&request->list);
        i915_gem_request_remove_from_client(request);
 
-       if (request->ctx)
-               i915_gem_context_unreference(request->ctx);
+       if (ctx) {
+               if (i915.enable_execlists) {
+                       struct intel_engine_cs *ring = request->ring;
 
+                       if (ctx != ring->default_context)
+                               intel_lr_context_unpin(ring, ctx);
+               }
+               i915_gem_context_unreference(ctx);
+       }
        kfree(request);
 }
 
@@ -2554,6 +2636,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
                i915_gem_object_move_to_inactive(obj);
        }
 
+       /*
+        * Clear the execlists queue up before freeing the requests, as those
+        * are the ones that keep the context and ringbuffer backing objects
+        * pinned in place.
+        */
+       while (!list_empty(&ring->execlist_queue)) {
+               struct intel_ctx_submit_request *submit_req;
+
+               submit_req = list_first_entry(&ring->execlist_queue,
+                               struct intel_ctx_submit_request,
+                               execlist_link);
+               list_del(&submit_req->execlist_link);
+               intel_runtime_pm_put(dev_priv);
+               i915_gem_context_unreference(submit_req->ctx);
+               kfree(submit_req);
+       }
+
        /*
         * We must free the requests after all the corresponding objects have
         * been moved off active lists. Which is the same order as the normal
@@ -2571,18 +2670,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
                i915_gem_free_request(request);
        }
 
-       while (!list_empty(&ring->execlist_queue)) {
-               struct intel_ctx_submit_request *submit_req;
-
-               submit_req = list_first_entry(&ring->execlist_queue,
-                               struct intel_ctx_submit_request,
-                               execlist_link);
-               list_del(&submit_req->execlist_link);
-               intel_runtime_pm_put(dev_priv);
-               i915_gem_context_unreference(submit_req->ctx);
-               kfree(submit_req);
-       }
-
        /* These may not have been flush before the reset, do so now */
        kfree(ring->preallocated_lazy_request);
        ring->preallocated_lazy_request = NULL;
@@ -2719,6 +2806,15 @@ i915_gem_retire_requests(struct drm_device *dev)
        for_each_ring(ring, dev_priv, i) {
                i915_gem_retire_requests_ring(ring);
                idle &= list_empty(&ring->request_list);
+               if (i915.enable_execlists) {
+                       unsigned long flags;
+
+                       spin_lock_irqsave(&ring->execlist_lock, flags);
+                       idle &= list_empty(&ring->execlist_queue);
+                       spin_unlock_irqrestore(&ring->execlist_lock, flags);
+
+                       intel_execlists_retire_requests(ring);
+               }
        }
 
        if (idle)
@@ -2811,6 +2907,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        u32 seqno = 0;
        int ret = 0;
 
+       if (args->flags != 0)
+               return -EINVAL;
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
@@ -2846,8 +2945,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        mutex_unlock(&dev->struct_mutex);
 
-       return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
-                           file->driver_priv);
+       return __i915_wait_seqno(ring, seqno, reset_counter, true,
+                                &args->timeout_ns, file->driver_priv);
 
 out:
        drm_gem_object_unreference(&obj->base);
@@ -3166,6 +3265,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
             obj->stride, obj->tiling_mode);
 
        switch (INTEL_INFO(dev)->gen) {
+       case 9:
        case 8:
        case 7:
        case 6:
@@ -3384,46 +3484,6 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
        return true;
 }
 
-static void i915_gem_verify_gtt(struct drm_device *dev)
-{
-#if WATCH_GTT
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj;
-       int err = 0;
-
-       list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
-               if (obj->gtt_space == NULL) {
-                       printk(KERN_ERR "object found on GTT list with no space reserved\n");
-                       err++;
-                       continue;
-               }
-
-               if (obj->cache_level != obj->gtt_space->color) {
-                       printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
-                              i915_gem_obj_ggtt_offset(obj),
-                              i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
-                              obj->cache_level,
-                              obj->gtt_space->color);
-                       err++;
-                       continue;
-               }
-
-               if (!i915_gem_valid_gtt_space(dev,
-                                             obj->gtt_space,
-                                             obj->cache_level)) {
-                       printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
-                              i915_gem_obj_ggtt_offset(obj),
-                              i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
-                              obj->cache_level);
-                       err++;
-                       continue;
-               }
-       }
-
-       WARN_ON(err);
-#endif
-}
-
 /**
  * Finds free space in the GTT aperture and binds the object there.
  */
@@ -3514,25 +3574,10 @@ search_free:
        list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
        list_add_tail(&vma->mm_list, &vm->inactive_list);
 
-       if (i915_is_ggtt(vm)) {
-               bool mappable, fenceable;
-
-               fenceable = (vma->node.size == fence_size &&
-                            (vma->node.start & (fence_alignment - 1)) == 0);
-
-               mappable = (vma->node.start + obj->base.size <=
-                           dev_priv->gtt.mappable_end);
-
-               obj->map_and_fenceable = mappable && fenceable;
-       }
-
-       WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
-
        trace_i915_vma_bind(vma, flags);
        vma->bind_vma(vma, obj->cache_level,
-                     flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
+                     flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
 
-       i915_gem_verify_gtt(dev);
        return vma;
 
 err_remove_node:
@@ -3560,7 +3605,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
         * Stolen memory is always coherent with the GPU as it is explicitly
         * marked as wc by the system, or the system is cache-coherent.
         */
-       if (obj->stolen)
+       if (obj->stolen || obj->phys_handle)
                return false;
 
        /* If the GPU is snooping the contents of the CPU cache,
@@ -3739,7 +3784,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                list_for_each_entry(vma, &obj->vma_list, vma_link)
                        if (drm_mm_node_allocated(&vma->node))
                                vma->bind_vma(vma, cache_level,
-                                             obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
+                                               vma->bound & GLOBAL_BIND);
        }
 
        list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3769,7 +3814,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                                    old_write_domain);
        }
 
-       i915_gem_verify_gtt(dev);
        return 0;
 }
 
@@ -4067,7 +4111,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        if (seqno == 0)
                return 0;
 
-       ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
+       ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
        if (ret == 0)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
@@ -4101,6 +4145,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        struct i915_vma *vma;
+       unsigned bound;
        int ret;
 
        if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
@@ -4109,6 +4154,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
        if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
                return -EINVAL;
 
+       if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
+               return -EINVAL;
+
        vma = i915_gem_obj_to_vma(obj, vm);
        if (vma) {
                if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
@@ -4130,15 +4178,39 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
                }
        }
 
+       bound = vma ? vma->bound : 0;
        if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
                vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
                if (IS_ERR(vma))
                        return PTR_ERR(vma);
        }
 
-       if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
+       if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
                vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
 
+       if ((bound ^ vma->bound) & GLOBAL_BIND) {
+               bool mappable, fenceable;
+               u32 fence_size, fence_alignment;
+
+               fence_size = i915_gem_get_gtt_size(obj->base.dev,
+                                                  obj->base.size,
+                                                  obj->tiling_mode);
+               fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
+                                                            obj->base.size,
+                                                            obj->tiling_mode,
+                                                            true);
+
+               fenceable = (vma->node.size == fence_size &&
+                            (vma->node.start & (fence_alignment - 1)) == 0);
+
+               mappable = (vma->node.start + obj->base.size <=
+                           dev_priv->gtt.mappable_end);
+
+               obj->map_and_fenceable = mappable && fenceable;
+       }
+
+       WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+
        vma->pin_count++;
        if (flags & PIN_MAPPABLE)
                obj->pin_mappable |= true;
@@ -4193,7 +4265,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_object *obj;
        int ret;
 
-       if (INTEL_INFO(dev)->gen >= 6)
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
                return -ENODEV;
 
        ret = i915_mutex_lock_interruptible(dev);
@@ -4249,6 +4321,9 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_object *obj;
        int ret;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -ENODEV;
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
@@ -4326,6 +4401,7 @@ int
 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_madvise *args = data;
        struct drm_i915_gem_object *obj;
        int ret;
@@ -4353,6 +4429,15 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
+       if (obj->pages &&
+           obj->tiling_mode != I915_TILING_NONE &&
+           dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+               if (obj->madv == I915_MADV_WILLNEED)
+                       i915_gem_object_unpin_pages(obj);
+               if (args->madv == I915_MADV_WILLNEED)
+                       i915_gem_object_pin_pages(obj);
+       }
+
        if (obj->madv != __I915_MADV_PURGED)
                obj->madv = args->madv;
 
@@ -4495,8 +4580,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
                }
        }
 
-       i915_gem_object_detach_phys(obj);
-
        /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
         * before progressing. */
        if (obj->stolen)
@@ -4504,6 +4587,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 
        WARN_ON(obj->frontbuffer_bits);
 
+       if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
+           dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
+           obj->tiling_mode != I915_TILING_NONE)
+               i915_gem_object_unpin_pages(obj);
+
        if (WARN_ON(obj->pages_pin_count))
                obj->pages_pin_count = 0;
        if (discard_backing_storage(obj))
@@ -4576,9 +4664,6 @@ i915_gem_suspend(struct drm_device *dev)
        int ret = 0;
 
        mutex_lock(&dev->struct_mutex);
-       if (dev_priv->ums.mm_suspended)
-               goto err;
-
        ret = i915_gpu_idle(dev);
        if (ret)
                goto err;
@@ -4589,15 +4674,7 @@ i915_gem_suspend(struct drm_device *dev)
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_gem_evict_everything(dev);
 
-       i915_kernel_lost_context(dev);
        i915_gem_stop_ringbuffers(dev);
-
-       /* Hack!  Don't let anybody do execbuf while we don't control the chip.
-        * We need to replace this with a semaphore, or something.
-        * And not confound ums.mm_suspended!
-        */
-       dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
-                                                            DRIVER_MODESET);
        mutex_unlock(&dev->struct_mutex);
 
        del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
@@ -4888,9 +4965,6 @@ int i915_gem_init(struct drm_device *dev)
        }
        mutex_unlock(&dev->struct_mutex);
 
-       /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               dev_priv->dri1.allow_batchbuffer = 1;
        return ret;
 }
 
@@ -4905,74 +4979,6 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
                dev_priv->gt.cleanup_ring(ring);
 }
 
-int
-i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
-                      struct drm_file *file_priv)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return 0;
-
-       if (i915_reset_in_progress(&dev_priv->gpu_error)) {
-               DRM_ERROR("Reenabling wedged hardware, good luck\n");
-               atomic_set(&dev_priv->gpu_error.reset_counter, 0);
-       }
-
-       mutex_lock(&dev->struct_mutex);
-       dev_priv->ums.mm_suspended = 0;
-
-       ret = i915_gem_init_hw(dev);
-       if (ret != 0) {
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
-
-       BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
-
-       ret = drm_irq_install(dev, dev->pdev->irq);
-       if (ret)
-               goto cleanup_ringbuffer;
-       mutex_unlock(&dev->struct_mutex);
-
-       return 0;
-
-cleanup_ringbuffer:
-       i915_gem_cleanup_ringbuffer(dev);
-       dev_priv->ums.mm_suspended = 1;
-       mutex_unlock(&dev->struct_mutex);
-
-       return ret;
-}
-
-int
-i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
-                      struct drm_file *file_priv)
-{
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return 0;
-
-       mutex_lock(&dev->struct_mutex);
-       drm_irq_uninstall(dev);
-       mutex_unlock(&dev->struct_mutex);
-
-       return i915_gem_suspend(dev);
-}
-
-void
-i915_gem_lastclose(struct drm_device *dev)
-{
-       int ret;
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return;
-
-       ret = i915_gem_suspend(dev);
-       if (ret)
-               DRM_ERROR("failed to idle hardware: %d\n", ret);
-}
-
 static void
 init_ring_lists(struct intel_engine_cs *ring)
 {
@@ -5119,6 +5125,15 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
        return ret;
 }
 
+/**
+ * i915_gem_track_fb - update frontbuffer tracking
+ * old: current GEM buffer for the frontbuffer slots
+ * new: new GEM buffer for the frontbuffer slots
+ * frontbuffer_bits: bitmask of frontbuffer slots
+ *
+ * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
+ * from @old and setting them in @new. Both @old and @new can be NULL.
+ */
 void i915_gem_track_fb(struct drm_i915_gem_object *old,
                       struct drm_i915_gem_object *new,
                       unsigned frontbuffer_bits)
@@ -5302,7 +5317,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
        struct drm_device *dev = dev_priv->dev;
        struct drm_i915_gem_object *obj;
        unsigned long timeout = msecs_to_jiffies(5000) + 1;
-       unsigned long pinned, bound, unbound, freed;
+       unsigned long pinned, bound, unbound, freed_pages;
        bool was_interruptible;
        bool unlock;
 
@@ -5319,7 +5334,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
        was_interruptible = dev_priv->mm.interruptible;
        dev_priv->mm.interruptible = false;
 
-       freed = i915_gem_shrink_all(dev_priv);
+       freed_pages = i915_gem_shrink_all(dev_priv);
 
        dev_priv->mm.interruptible = was_interruptible;
 
@@ -5350,14 +5365,15 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
        if (unlock)
                mutex_unlock(&dev->struct_mutex);
 
-       pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
-               freed, pinned);
+       if (freed_pages || unbound || bound)
+               pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
+                       freed_pages << PAGE_SHIFT, pinned);
        if (unbound || bound)
                pr_err("%lu and %lu bytes still available in the "
                       "bound and unbound GPU page lists.\n",
                       bound, unbound);
 
-       *(unsigned long *)ptr += freed;
+       *(unsigned long *)ptr += freed_pages;
        return NOTIFY_DONE;
 }
 
index a5221d8f1580182fe80c7c9cfc34da7d3484fe11..d17ff435f2767fa31c51ed08ef5e3130aa4dddbd 100644 (file)
@@ -88,6 +88,7 @@
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
+#include "i915_trace.h"
 
 /* This is a HW constraint. The value below is the largest known requirement
  * I've seen in a spec to date, and that was a workaround for a non-shipping
@@ -137,6 +138,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
        struct intel_context *ctx = container_of(ctx_ref,
                                                 typeof(*ctx), ref);
 
+       trace_i915_context_free(ctx);
+
        if (i915.enable_execlists)
                intel_lr_context_free(ctx);
 
@@ -274,6 +277,8 @@ i915_gem_create_context(struct drm_device *dev,
                ctx->ppgtt = ppgtt;
        }
 
+       trace_i915_context_create(ctx);
+
        return ctx;
 
 err_unpin:
@@ -522,6 +527,7 @@ static int do_switch(struct intel_engine_cs *ring,
        struct intel_context *from = ring->last_context;
        u32 hw_flags = 0;
        bool uninitialized = false;
+       struct i915_vma *vma;
        int ret, i;
 
        if (from != NULL && ring == &dev_priv->ring[RCS]) {
@@ -548,6 +554,7 @@ static int do_switch(struct intel_engine_cs *ring,
        from = ring->last_context;
 
        if (to->ppgtt) {
+               trace_switch_mm(ring, to);
                ret = to->ppgtt->switch_mm(to->ppgtt, ring);
                if (ret)
                        goto unpin_out;
@@ -571,11 +578,10 @@ static int do_switch(struct intel_engine_cs *ring,
        if (ret)
                goto unpin_out;
 
-       if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
-               struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
-                                                          &dev_priv->gtt.base);
-               vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
-       }
+       vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
+       if (!(vma->bound & GLOBAL_BIND))
+               vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level,
+                               GLOBAL_BIND);
 
        if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
                hw_flags |= MI_RESTORE_INHIBIT;
@@ -629,7 +635,7 @@ done:
 
        if (uninitialized) {
                if (ring->init_context) {
-                       ret = ring->init_context(ring);
+                       ret = ring->init_context(ring, to);
                        if (ret)
                                DRM_ERROR("ring init context: %d\n", ret);
                }
index 1a0611bb576b3eb9b78ede149955f36b798360df..f06027ba3ee5512a7718deb22112f9fe6e16458b 100644 (file)
@@ -121,6 +121,9 @@ eb_lookup_vmas(struct eb_vmas *eb,
                        goto err;
                }
 
+               WARN_ONCE(obj->base.dumb,
+                         "GPU use of dumb buffer is illegal.\n");
+
                drm_gem_object_reference(&obj->base);
                list_add_tail(&obj->obj_exec_link, &objects);
        }
@@ -357,12 +360,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
         * through the ppgtt for non_secure batchbuffers. */
        if (unlikely(IS_GEN6(dev) &&
            reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
-           !target_i915_obj->has_global_gtt_mapping)) {
-               struct i915_vma *vma =
-                       list_first_entry(&target_i915_obj->vma_list,
-                                        typeof(*vma), vma_link);
-               vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
-       }
+           !(target_vma->bound & GLOBAL_BIND)))
+               target_vma->bind_vma(target_vma, target_i915_obj->cache_level,
+                               GLOBAL_BIND);
 
        /* Validate that the target is in a valid r/w GPU domain */
        if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
@@ -531,7 +531,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 
        flags = 0;
        if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
-               flags |= PIN_MAPPABLE;
+               flags |= PIN_GLOBAL | PIN_MAPPABLE;
        if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
                flags |= PIN_GLOBAL;
        if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
@@ -1023,6 +1023,47 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
        return 0;
 }
 
+static int
+i915_emit_box(struct intel_engine_cs *ring,
+             struct drm_clip_rect *box,
+             int DR1, int DR4)
+{
+       int ret;
+
+       if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
+           box->y2 <= 0 || box->x2 <= 0) {
+               DRM_ERROR("Bad box %d,%d..%d,%d\n",
+                         box->x1, box->y1, box->x2, box->y2);
+               return -EINVAL;
+       }
+
+       if (INTEL_INFO(ring->dev)->gen >= 4) {
+               ret = intel_ring_begin(ring, 4);
+               if (ret)
+                       return ret;
+
+               intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
+               intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
+               intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
+               intel_ring_emit(ring, DR4);
+       } else {
+               ret = intel_ring_begin(ring, 6);
+               if (ret)
+                       return ret;
+
+               intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
+               intel_ring_emit(ring, DR1);
+               intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
+               intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
+               intel_ring_emit(ring, DR4);
+               intel_ring_emit(ring, 0);
+       }
+       intel_ring_advance(ring);
+
+       return 0;
+}
+
+
 int
 i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
                               struct intel_engine_cs *ring,
@@ -1151,7 +1192,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
        exec_len = args->batch_len;
        if (cliprects) {
                for (i = 0; i < args->num_cliprects; i++) {
-                       ret = i915_emit_box(dev, &cliprects[i],
+                       ret = i915_emit_box(ring, &cliprects[i],
                                            args->DR1, args->DR4);
                        if (ret)
                                goto error;
@@ -1300,12 +1341,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret)
                goto pre_mutex_err;
 
-       if (dev_priv->ums.mm_suspended) {
-               mutex_unlock(&dev->struct_mutex);
-               ret = -EBUSY;
-               goto pre_mutex_err;
-       }
-
        ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
        if (IS_ERR(ctx)) {
                mutex_unlock(&dev->struct_mutex);
@@ -1368,17 +1403,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                                      batch_obj,
                                      args->batch_start_offset,
                                      file->is_master);
-               if (ret)
-                       goto err;
-
-               /*
-                * XXX: Actually do this when enabling batch copy...
-                *
-                * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
-                * from MI_BATCH_BUFFER_START commands issued in the
-                * dispatch_execbuffer implementations. We specifically don't
-                * want that set when the command parser is enabled.
-                */
+               if (ret) {
+                       if (ret != -EACCES)
+                               goto err;
+               } else {
+                       /*
+                        * XXX: Actually do this when enabling batch copy...
+                        *
+                        * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
+                        * from MI_BATCH_BUFFER_START commands issued in the
+                        * dispatch_execbuffer implementations. We specifically don't
+                        * want that set when the command parser is enabled.
+                        */
+               }
        }
 
        /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
index b672b843fd5e5831323094824116cffe912d8a55..171f6eafdeee3093d21354b411debebec994378b 100644 (file)
@@ -35,13 +35,26 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
 
 static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
 {
-       if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+       bool has_aliasing_ppgtt;
+       bool has_full_ppgtt;
+
+       has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
+       has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
+       if (IS_GEN8(dev))
+               has_full_ppgtt = false; /* XXX why? */
+
+       /*
+        * We don't allow disabling PPGTT for gen9+ as it's a requirement for
+        * execlists, the sole mechanism available to submit work.
+        */
+       if (INTEL_INFO(dev)->gen < 9 &&
+           (enable_ppgtt == 0 || !has_aliasing_ppgtt))
                return 0;
 
        if (enable_ppgtt == 1)
                return 1;
 
-       if (enable_ppgtt == 2 && HAS_PPGTT(dev))
+       if (enable_ppgtt == 2 && has_full_ppgtt)
                return 2;
 
 #ifdef CONFIG_INTEL_IOMMU
@@ -59,7 +72,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
                return 0;
        }
 
-       return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
+       return has_aliasing_ppgtt ? 1 : 0;
 }
 
 
@@ -156,9 +169,6 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
        gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
-       /* Mark the page as writeable.  Other platforms don't have a
-        * setting for read-only/writable, so this matches that behavior.
-        */
        if (!(flags & PTE_READ_ONLY))
                pte |= BYT_PTE_WRITEABLE;
 
@@ -1092,7 +1102,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 
        if (INTEL_INFO(dev)->gen < 8)
                return gen6_ppgtt_init(ppgtt);
-       else if (IS_GEN8(dev))
+       else if (IS_GEN8(dev) || IS_GEN9(dev))
                return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
        else
                BUG();
@@ -1166,6 +1176,8 @@ i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
 
        ppgtt->file_priv = fpriv;
 
+       trace_i915_ppgtt_create(&ppgtt->base);
+
        return ppgtt;
 }
 
@@ -1174,6 +1186,8 @@ void  i915_ppgtt_release(struct kref *kref)
        struct i915_hw_ppgtt *ppgtt =
                container_of(kref, struct i915_hw_ppgtt, ref);
 
+       trace_i915_ppgtt_release(&ppgtt->base);
+
        /* vmas should already be unbound */
        WARN_ON(!list_empty(&ppgtt->base.active_list));
        WARN_ON(!list_empty(&ppgtt->base.inactive_list));
@@ -1258,7 +1272,7 @@ void i915_check_and_clear_faults(struct drm_device *dev)
                fault_reg = I915_READ(RING_FAULT_REG(ring));
                if (fault_reg & RING_FAULT_VALID) {
                        DRM_DEBUG_DRIVER("Unexpected fault\n"
-                                        "\tAddr: 0x%08lx\\n"
+                                        "\tAddr: 0x%08lx\n"
                                         "\tAddress space: %s\n"
                                         "\tSource ID: %d\n"
                                         "\tType: %d\n",
@@ -1328,7 +1342,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
                 * Unfortunately above, we've just wiped out the mappings
                 * without telling our object about it. So we need to fake it.
                 */
-               obj->has_global_gtt_mapping = 0;
+               vma->bound &= ~GLOBAL_BIND;
                vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
        }
 
@@ -1525,7 +1539,7 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma,
 
        BUG_ON(!i915_is_ggtt(vma->vm));
        intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
-       vma->obj->has_global_gtt_mapping = 1;
+       vma->bound = GLOBAL_BIND;
 }
 
 static void i915_ggtt_clear_range(struct i915_address_space *vm,
@@ -1544,7 +1558,7 @@ static void i915_ggtt_unbind_vma(struct i915_vma *vma)
        const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
 
        BUG_ON(!i915_is_ggtt(vma->vm));
-       vma->obj->has_global_gtt_mapping = 0;
+       vma->bound = 0;
        intel_gtt_clear_range(first, size);
 }
 
@@ -1572,24 +1586,24 @@ static void ggtt_bind_vma(struct i915_vma *vma,
         * flags. At all other times, the GPU will use the aliasing PPGTT.
         */
        if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
-               if (!obj->has_global_gtt_mapping ||
+               if (!(vma->bound & GLOBAL_BIND) ||
                    (cache_level != obj->cache_level)) {
                        vma->vm->insert_entries(vma->vm, obj->pages,
                                                vma->node.start,
                                                cache_level, flags);
-                       obj->has_global_gtt_mapping = 1;
+                       vma->bound |= GLOBAL_BIND;
                }
        }
 
        if (dev_priv->mm.aliasing_ppgtt &&
-           (!obj->has_aliasing_ppgtt_mapping ||
+           (!(vma->bound & LOCAL_BIND) ||
             (cache_level != obj->cache_level))) {
                struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
                appgtt->base.insert_entries(&appgtt->base,
                                            vma->obj->pages,
                                            vma->node.start,
                                            cache_level, flags);
-               vma->obj->has_aliasing_ppgtt_mapping = 1;
+               vma->bound |= LOCAL_BIND;
        }
 }
 
@@ -1599,21 +1613,21 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj = vma->obj;
 
-       if (obj->has_global_gtt_mapping) {
+       if (vma->bound & GLOBAL_BIND) {
                vma->vm->clear_range(vma->vm,
                                     vma->node.start,
                                     obj->base.size,
                                     true);
-               obj->has_global_gtt_mapping = 0;
+               vma->bound &= ~GLOBAL_BIND;
        }
 
-       if (obj->has_aliasing_ppgtt_mapping) {
+       if (vma->bound & LOCAL_BIND) {
                struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
                appgtt->base.clear_range(&appgtt->base,
                                         vma->node.start,
                                         obj->base.size,
                                         true);
-               obj->has_aliasing_ppgtt_mapping = 0;
+               vma->bound &= ~LOCAL_BIND;
        }
 }
 
@@ -1650,10 +1664,10 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
        }
 }
 
-int i915_gem_setup_global_gtt(struct drm_device *dev,
-                             unsigned long start,
-                             unsigned long mappable_end,
-                             unsigned long end)
+static int i915_gem_setup_global_gtt(struct drm_device *dev,
+                                    unsigned long start,
+                                    unsigned long mappable_end,
+                                    unsigned long end)
 {
        /* Let GEM Manage all of the aperture.
         *
@@ -1691,7 +1705,7 @@ int i915_gem_setup_global_gtt(struct drm_device *dev,
                        DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
                        return ret;
                }
-               obj->has_global_gtt_mapping = 1;
+               vma->bound |= GLOBAL_BIND;
        }
 
        dev_priv->gtt.base.start = start;
@@ -1764,7 +1778,6 @@ static int setup_scratch_page(struct drm_device *dev)
        page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
        if (page == NULL)
                return -ENOMEM;
-       get_page(page);
        set_pages_uc(page, 1);
 
 #ifdef CONFIG_INTEL_IOMMU
@@ -1789,7 +1802,6 @@ static void teardown_scratch_page(struct drm_device *dev)
        set_pages_wb(page, 1);
        pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       put_page(page);
        __free_page(page);
 }
 
@@ -1859,6 +1871,18 @@ static size_t chv_get_stolen_size(u16 gmch_ctrl)
                return (gmch_ctrl - 0x17 + 9) << 22;
 }
 
+static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
+{
+       gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
+       gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
+
+       if (gen9_gmch_ctl < 0xf0)
+               return gen9_gmch_ctl << 25; /* 32 MB units */
+       else
+               /* 4MB increments starting at 0xf0 for 4MB */
+               return (gen9_gmch_ctl - 0xf0 + 1) << 22;
+}
+
 static int ggtt_probe_common(struct drm_device *dev,
                             size_t gtt_size)
 {
@@ -1902,6 +1926,22 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
              GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
              GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
 
+       if (!USES_PPGTT(dev_priv->dev))
+               /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
+                * so RTL will always use the value corresponding to
+                * pat_sel = 000".
+                * So let's disable cache for GGTT to avoid screen corruptions.
+                * MOCS still can be used though.
+                * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
+                * before this patch, i.e. the same uncached + snooping access
+                * like on gen6/7 seems to be in effect.
+                * - So this just fixes blitter/render access. Again it looks
+                * like it's not just uncached access, but uncached + snooping.
+                * So we can still hold onto all our assumptions wrt cpu
+                * clflushing on LLC machines.
+                */
+               pat = GEN8_PPAT(0, GEN8_PPAT_UC);
+
        /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
         * write would work. */
        I915_WRITE(GEN8_PRIVATE_PAT, pat);
@@ -1918,9 +1958,17 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
         * Only the snoop bit has meaning for CHV, the rest is
         * ignored.
         *
-        * Note that the harware enforces snooping for all page
-        * table accesses. The snoop bit is actually ignored for
-        * PDEs.
+        * The hardware will never snoop for certain types of accesses:
+        * - CPU GTT (GMADR->GGTT->no snoop->memory)
+        * - PPGTT page tables
+        * - some other special cycles
+        *
+        * As with BDW, we also need to consider the following for GT accesses:
+        * "For GGTT, there is NO pat_sel[2:0] from the entry,
+        * so RTL will always use the value corresponding to
+        * pat_sel = 000".
+        * Which means we must set the snoop bit in PAT entry 0
+        * in order to keep the global status page working.
         */
        pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
              GEN8_PPAT(1, 0) |
@@ -1955,7 +2003,10 @@ static int gen8_gmch_probe(struct drm_device *dev,
 
        pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
-       if (IS_CHERRYVIEW(dev)) {
+       if (INTEL_INFO(dev)->gen >= 9) {
+               *stolen = gen9_get_stolen_size(snb_gmch_ctl);
+               gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+       } else if (IS_CHERRYVIEW(dev)) {
                *stolen = chv_get_stolen_size(snb_gmch_ctl);
                gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
        } else {
@@ -2127,6 +2178,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
        vma->obj = obj;
 
        switch (INTEL_INFO(vm->dev)->gen) {
+       case 9:
        case 8:
        case 7:
        case 6:
index d5c14af51e995d5c19a6d37b950dc2691a44ffb2..beaf4bcfdac8d83d27c466432f7f92c51b8bdf5e 100644 (file)
@@ -123,6 +123,12 @@ struct i915_vma {
        struct drm_i915_gem_object *obj;
        struct i915_address_space *vm;
 
+       /** Flags and address space this VMA is bound to */
+#define GLOBAL_BIND    (1<<0)
+#define LOCAL_BIND     (1<<1)
+#define PTE_READ_ONLY  (1<<2)
+       unsigned int bound : 4;
+
        /** This object's place on the active/inactive lists */
        struct list_head mm_list;
 
@@ -155,8 +161,6 @@ struct i915_vma {
         * setting the valid PTE entries to a reserved scratch page. */
        void (*unbind_vma)(struct i915_vma *vma);
        /* Map an object into an address space with the given cache flags. */
-#define GLOBAL_BIND (1<<0)
-#define PTE_READ_ONLY (1<<1)
        void (*bind_vma)(struct i915_vma *vma,
                         enum i915_cache_level cache_level,
                         u32 flags);
@@ -270,8 +274,6 @@ struct i915_hw_ppgtt {
 
 int i915_gem_gtt_init(struct drm_device *dev);
 void i915_gem_init_global_gtt(struct drm_device *dev);
-int i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
-                             unsigned long mappable_end, unsigned long end);
 void i915_global_gtt_cleanup(struct drm_device *dev);
 
 
index a9a62d75aa577daa015543c7955c25cd5bf5b42a..98dcd94acba8f25a360f0e03f0c67b874fe3a72a 100644 (file)
@@ -38,6 +38,8 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
                return &gen7_null_state;
        case 8:
                return &gen8_null_state;
+       case 9:
+               return &gen9_null_state;
        }
 
        return NULL;
index 85fda6b803e4d33b8fbb405127f1a3d702478a8e..c3889189254774cd1de84a26c0153d1146adfa9f 100644 (file)
@@ -533,7 +533,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
                }
        }
 
-       obj->has_global_gtt_mapping = 1;
+       vma->bound |= GLOBAL_BIND;
 
        list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
        list_add_tail(&vma->mm_list, &ggtt->inactive_list);
index 2cefb597df6dc92d446557442073d5c998843a30..4727a4e2c87c98669c03c07a526723b8dcec49a9 100644 (file)
@@ -102,22 +102,33 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
                swizzle_x = I915_BIT_6_SWIZZLE_NONE;
                swizzle_y = I915_BIT_6_SWIZZLE_NONE;
        } else if (INTEL_INFO(dev)->gen >= 6) {
-               uint32_t dimm_c0, dimm_c1;
-               dimm_c0 = I915_READ(MAD_DIMM_C0);
-               dimm_c1 = I915_READ(MAD_DIMM_C1);
-               dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
-               dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
-               /* Enable swizzling when the channels are populated with
-                * identically sized dimms. We don't need to check the 3rd
-                * channel because no cpu with gpu attached ships in that
-                * configuration. Also, swizzling only makes sense for 2
-                * channels anyway. */
-               if (dimm_c0 == dimm_c1) {
-                       swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-                       swizzle_y = I915_BIT_6_SWIZZLE_9;
+               if (dev_priv->preserve_bios_swizzle) {
+                       if (I915_READ(DISP_ARB_CTL) &
+                           DISP_TILE_SURFACE_SWIZZLING) {
+                               swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+                               swizzle_y = I915_BIT_6_SWIZZLE_9;
+                       } else {
+                               swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+                               swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+                       }
                } else {
-                       swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-                       swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+                       uint32_t dimm_c0, dimm_c1;
+                       dimm_c0 = I915_READ(MAD_DIMM_C0);
+                       dimm_c1 = I915_READ(MAD_DIMM_C1);
+                       dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+                       dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+                       /* Enable swizzling when the channels are populated
+                        * with identically sized dimms. We don't need to check
+                        * the 3rd channel because no cpu with gpu attached
+                        * ships in that configuration. Also, swizzling only
+                        * makes sense for 2 channels anyway. */
+                       if (dimm_c0 == dimm_c1) {
+                               swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+                               swizzle_y = I915_BIT_6_SWIZZLE_9;
+                       } else {
+                               swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+                               swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+                       }
                }
        } else if (IS_GEN5(dev)) {
                /* On Ironlake whatever DRAM config, GPU always do
@@ -167,6 +178,15 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
                        }
                        break;
                }
+
+               /* check for L-shaped memory aka modified enhanced addressing */
+               if (IS_GEN4(dev)) {
+                       uint32_t ddc2 = I915_READ(DCC2);
+
+                       if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
+                               dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+               }
+
                if (dcc == 0xffffffff) {
                        DRM_ERROR("Couldn't read from MCHBAR.  "
                                  "Disabling tiling.\n");
@@ -364,24 +384,20 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                 * has to also include the unfenced register the GPU uses
                 * whilst executing a fenced command for an untiled object.
                 */
-
-               obj->map_and_fenceable =
-                       !i915_gem_obj_ggtt_bound(obj) ||
-                       (i915_gem_obj_ggtt_offset(obj) +
-                        obj->base.size <= dev_priv->gtt.mappable_end &&
-                        i915_gem_object_fence_ok(obj, args->tiling_mode));
-
-               /* Rebind if we need a change of alignment */
-               if (!obj->map_and_fenceable) {
-                       u32 unfenced_align =
-                               i915_gem_get_gtt_alignment(dev, obj->base.size,
-                                                           args->tiling_mode,
-                                                           false);
-                       if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
-                               ret = i915_gem_object_ggtt_unbind(obj);
-               }
+               if (obj->map_and_fenceable &&
+                   !i915_gem_object_fence_ok(obj, args->tiling_mode))
+                       ret = i915_gem_object_ggtt_unbind(obj);
 
                if (ret == 0) {
+                       if (obj->pages &&
+                           obj->madv == I915_MADV_WILLNEED &&
+                           dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+                               if (args->tiling_mode == I915_TILING_NONE)
+                                       i915_gem_object_unpin_pages(obj);
+                               if (obj->tiling_mode == I915_TILING_NONE)
+                                       i915_gem_object_pin_pages(obj);
+                       }
+
                        obj->fence_dirty =
                                obj->last_fenced_seqno ||
                                obj->fence_reg != I915_FENCE_REG_NONE;
@@ -447,6 +463,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
        }
 
        /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
+       args->phys_swizzle_mode = args->swizzle_mode;
        if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
                args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
        if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
index 2c87a797213f4e94305be2846bd9a7caa5765d51..cdaee6ce05f84c57658a9e5755151bea8a6f7a60 100644 (file)
@@ -242,11 +242,15 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
 
 static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
                                  struct drm_device *dev,
-                                 struct drm_i915_error_ring *ring)
+                                 struct drm_i915_error_state *error,
+                                 int ring_idx)
 {
+       struct drm_i915_error_ring *ring = &error->ring[ring_idx];
+
        if (!ring->valid)
                return;
 
+       err_printf(m, "%s command stream:\n", ring_str(ring_idx));
        err_printf(m, "  HEAD: 0x%08x\n", ring->head);
        err_printf(m, "  TAIL: 0x%08x\n", ring->tail);
        err_printf(m, "  CTL: 0x%08x\n", ring->ctl);
@@ -388,10 +392,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
        if (INTEL_INFO(dev)->gen == 7)
                err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
-       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-               err_printf(m, "%s command stream:\n", ring_str(i));
-               i915_ring_error_state(m, dev, &error->ring[i]);
-       }
+       for (i = 0; i < ARRAY_SIZE(error->ring); i++)
+               i915_ring_error_state(m, dev, error, i);
 
        for (i = 0; i < error->vm_count; i++) {
                err_printf(m, "vm[%d]\n", i);
@@ -565,6 +567,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
                         struct i915_address_space *vm)
 {
        struct drm_i915_error_object *dst;
+       struct i915_vma *vma = NULL;
        int num_pages;
        bool use_ggtt;
        int i = 0;
@@ -585,16 +588,17 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
                dst->gtt_offset = -1;
 
        reloc_offset = dst->gtt_offset;
+       if (i915_is_ggtt(vm))
+               vma = i915_gem_obj_to_ggtt(src);
        use_ggtt = (src->cache_level == I915_CACHE_NONE &&
-                   i915_is_ggtt(vm) &&
-                   src->has_global_gtt_mapping &&
-                   reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
+                  vma && (vma->bound & GLOBAL_BIND) &&
+                  reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
 
        /* Cannot access stolen address directly, try to use the aperture */
        if (src->stolen) {
                use_ggtt = true;
 
-               if (!src->has_global_gtt_mapping)
+               if (!(vma && vma->bound & GLOBAL_BIND))
                        goto unwind;
 
                reloc_offset = i915_gem_obj_ggtt_offset(src);
@@ -765,6 +769,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
 
        /* Fences */
        switch (INTEL_INFO(dev)->gen) {
+       case 9:
        case 8:
        case 7:
        case 6:
@@ -804,9 +809,8 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
 
        if (!error->semaphore_obj)
                error->semaphore_obj =
-                       i915_error_object_create(dev_priv,
-                                                dev_priv->semaphore_obj,
-                                                &dev_priv->gtt.base);
+                       i915_error_ggtt_object_create(dev_priv,
+                                                     dev_priv->semaphore_obj);
 
        for_each_ring(to, dev_priv, i) {
                int idx;
@@ -923,6 +927,7 @@ static void i915_record_ring_state(struct drm_device *dev,
                ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
 
                switch (INTEL_INFO(dev)->gen) {
+               case 9:
                case 8:
                        for (i = 0; i < 4; i++) {
                                ering->vm_info.pdp[i] =
@@ -1238,7 +1243,8 @@ static void i915_error_capture_msg(struct drm_device *dev,
        ecode = i915_error_generate_code(dev_priv, error, &ring_id);
 
        len = scnprintf(error->error_msg, sizeof(error->error_msg),
-                       "GPU HANG: ecode %d:0x%08x", ring_id, ecode);
+                       "GPU HANG: ecode %d:%d:0x%08x",
+                       INTEL_INFO(dev)->gen, ring_id, ecode);
 
        if (ring_id != -1 && error->ring[ring_id].pid != -1)
                len += scnprintf(error->error_msg + len,
@@ -1326,13 +1332,12 @@ void i915_error_state_get(struct drm_device *dev,
                          struct i915_error_state_file_priv *error_priv)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
 
-       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+       spin_lock_irq(&dev_priv->gpu_error.lock);
        error_priv->error = dev_priv->gpu_error.first_error;
        if (error_priv->error)
                kref_get(&error_priv->error->ref);
-       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+       spin_unlock_irq(&dev_priv->gpu_error.lock);
 
 }
 
@@ -1346,12 +1351,11 @@ void i915_destroy_error_state(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_error_state *error;
-       unsigned long flags;
 
-       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+       spin_lock_irq(&dev_priv->gpu_error.lock);
        error = dev_priv->gpu_error.first_error;
        dev_priv->gpu_error.first_error = NULL;
-       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+       spin_unlock_irq(&dev_priv->gpu_error.lock);
 
        if (error)
                kref_put(&error->ref, i915_error_state_free);
@@ -1389,6 +1393,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
                WARN_ONCE(1, "Unsupported platform\n");
        case 7:
        case 8:
+       case 9:
                instdone[0] = I915_READ(GEN7_INSTDONE_1);
                instdone[1] = I915_READ(GEN7_SC_INSTDONE);
                instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
index 2e0613e262515f7d2ad6f8b75c4f0884bdf05333..176de6322e4d0039482422bc1210ff89a1af769b 100644 (file)
@@ -189,7 +189,6 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
        [DRM_I915_ALLOC] = compat_i915_alloc
 };
 
-#ifdef CONFIG_COMPAT
 /**
  * Called whenever a 32-bit process running under a 64-bit kernel
  * performs an ioctl on /dev/dri/card<n>.
@@ -218,4 +217,3 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
        return ret;
 }
-#endif
index f66392b6e287c3036f420c022241a74108c65527..981834b0f9b6309b4f09911c4406a58fadbe0287 100644 (file)
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+/**
+ * DOC: interrupt handling
+ *
+ * These functions provide the basic support for enabling and disabling the
+ * interrupt handling support. There's a lot more functionality in i915_irq.c
+ * and related files, but that will be described in separate chapters.
+ */
+
 static const u32 hpd_ibx[] = {
        [HPD_CRT] = SDE_CRT_HOTPLUG,
        [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
@@ -118,20 +126,22 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
 
 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
        GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
-       I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
        I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
-       POSTING_READ(GEN8_##type##_IER(which)); \
+       I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
+       POSTING_READ(GEN8_##type##_IMR(which)); \
 } while (0)
 
 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
        GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
-       I915_WRITE(type##IMR, (imr_val)); \
        I915_WRITE(type##IER, (ier_val)); \
-       POSTING_READ(type##IER); \
+       I915_WRITE(type##IMR, (imr_val)); \
+       POSTING_READ(type##IMR); \
 } while (0)
 
+static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
+
 /* For display hotplug interrupt */
-static void
+void
 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
 {
        assert_spin_locked(&dev_priv->irq_lock);
@@ -146,7 +156,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
        }
 }
 
-static void
+void
 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
 {
        assert_spin_locked(&dev_priv->irq_lock);
@@ -192,71 +202,28 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
        ilk_update_gt_irq(dev_priv, mask, 0);
 }
 
-/**
-  * snb_update_pm_irq - update GEN6_PMIMR
-  * @dev_priv: driver private
-  * @interrupt_mask: mask of interrupt bits to update
-  * @enabled_irq_mask: mask of interrupt bits to enable
-  */
-static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
-                             uint32_t interrupt_mask,
-                             uint32_t enabled_irq_mask)
-{
-       uint32_t new_val;
-
-       assert_spin_locked(&dev_priv->irq_lock);
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return;
-
-       new_val = dev_priv->pm_irq_mask;
-       new_val &= ~interrupt_mask;
-       new_val |= (~enabled_irq_mask & interrupt_mask);
-
-       if (new_val != dev_priv->pm_irq_mask) {
-               dev_priv->pm_irq_mask = new_val;
-               I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
-               POSTING_READ(GEN6_PMIMR);
-       }
-}
-
-void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
 {
-       snb_update_pm_irq(dev_priv, mask, mask);
+       return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
 }
 
-void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
 {
-       snb_update_pm_irq(dev_priv, mask, 0);
+       return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
 }
 
-static bool ivb_can_enable_err_int(struct drm_device *dev)
+static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *crtc;
-       enum pipe pipe;
-
-       assert_spin_locked(&dev_priv->irq_lock);
-
-       for_each_pipe(dev_priv, pipe) {
-               crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
-               if (crtc->cpu_fifo_underrun_disabled)
-                       return false;
-       }
-
-       return true;
+       return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
 }
 
 /**
-  * bdw_update_pm_irq - update GT interrupt 2
+  * snb_update_pm_irq - update GEN6_PMIMR
   * @dev_priv: driver private
   * @interrupt_mask: mask of interrupt bits to update
   * @enabled_irq_mask: mask of interrupt bits to enable
-  *
-  * Copied from the snb function, updated with relevant register offsets
   */
-static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
+static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
                              uint32_t interrupt_mask,
                              uint32_t enabled_irq_mask)
 {
@@ -264,144 +231,87 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
 
        assert_spin_locked(&dev_priv->irq_lock);
 
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return;
-
        new_val = dev_priv->pm_irq_mask;
        new_val &= ~interrupt_mask;
        new_val |= (~enabled_irq_mask & interrupt_mask);
 
        if (new_val != dev_priv->pm_irq_mask) {
                dev_priv->pm_irq_mask = new_val;
-               I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
-               POSTING_READ(GEN8_GT_IMR(2));
+               I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
+               POSTING_READ(gen6_pm_imr(dev_priv));
        }
 }
 
-void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
-       bdw_update_pm_irq(dev_priv, mask, mask);
-}
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+               return;
 
-void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
-{
-       bdw_update_pm_irq(dev_priv, mask, 0);
+       snb_update_pm_irq(dev_priv, mask, mask);
 }
 
-static bool cpt_can_enable_serr_int(struct drm_device *dev)
+static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
+                                 uint32_t mask)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       enum pipe pipe;
-       struct intel_crtc *crtc;
-
-       assert_spin_locked(&dev_priv->irq_lock);
-
-       for_each_pipe(dev_priv, pipe) {
-               crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
-               if (crtc->pch_fifo_underrun_disabled)
-                       return false;
-       }
-
-       return true;
+       snb_update_pm_irq(dev_priv, mask, 0);
 }
 
-void i9xx_check_fifo_underruns(struct drm_device *dev)
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *crtc;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
-       for_each_intel_crtc(dev, crtc) {
-               u32 reg = PIPESTAT(crtc->pipe);
-               u32 pipestat;
-
-               if (crtc->cpu_fifo_underrun_disabled)
-                       continue;
-
-               pipestat = I915_READ(reg) & 0xffff0000;
-               if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
-                       continue;
-
-               I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
-               POSTING_READ(reg);
-
-               DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
-       }
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+               return;
 
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       __gen6_disable_pm_irq(dev_priv, mask);
 }
 
-static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
-                                            enum pipe pipe,
-                                            bool enable, bool old)
+void gen6_reset_rps_interrupts(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 reg = PIPESTAT(pipe);
-       u32 pipestat = I915_READ(reg) & 0xffff0000;
-
-       assert_spin_locked(&dev_priv->irq_lock);
+       uint32_t reg = gen6_pm_iir(dev_priv);
 
-       if (enable) {
-               I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
-               POSTING_READ(reg);
-       } else {
-               if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
-                       DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
-       }
+       spin_lock_irq(&dev_priv->irq_lock);
+       I915_WRITE(reg, dev_priv->pm_rps_events);
+       I915_WRITE(reg, dev_priv->pm_rps_events);
+       POSTING_READ(reg);
+       spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
-                                                enum pipe pipe, bool enable)
+void gen6_enable_rps_interrupts(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
-                                         DE_PIPEB_FIFO_UNDERRUN;
 
-       if (enable)
-               ironlake_enable_display_irq(dev_priv, bit);
-       else
-               ironlake_disable_display_irq(dev_priv, bit);
+       spin_lock_irq(&dev_priv->irq_lock);
+       WARN_ON(dev_priv->rps.pm_iir);
+       WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
+       dev_priv->rps.interrupts_enabled = true;
+       gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+       spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
-                                                 enum pipe pipe,
-                                                 bool enable, bool old)
+void gen6_disable_rps_interrupts(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       if (enable) {
-               I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
 
-               if (!ivb_can_enable_err_int(dev))
-                       return;
+       spin_lock_irq(&dev_priv->irq_lock);
+       dev_priv->rps.interrupts_enabled = false;
+       spin_unlock_irq(&dev_priv->irq_lock);
 
-               ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
-       } else {
-               ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+       cancel_work_sync(&dev_priv->rps.work);
 
-               if (old &&
-                   I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
-                       DRM_ERROR("uncleared fifo underrun on pipe %c\n",
-                                 pipe_name(pipe));
-               }
-       }
-}
+       spin_lock_irq(&dev_priv->irq_lock);
 
-static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
-                                                 enum pipe pipe, bool enable)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
+                  ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
 
-       assert_spin_locked(&dev_priv->irq_lock);
+       __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+       I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
+                               ~dev_priv->pm_rps_events);
+       I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
+       I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
 
-       if (enable)
-               dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
-       else
-               dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
-       I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
-       POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+       dev_priv->rps.pm_iir = 0;
+
+       spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 /**
@@ -410,9 +320,9 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
  * @interrupt_mask: mask of interrupt bits to update
  * @enabled_irq_mask: mask of interrupt bits to enable
  */
-static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
-                                        uint32_t interrupt_mask,
-                                        uint32_t enabled_irq_mask)
+void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+                                 uint32_t interrupt_mask,
+                                 uint32_t enabled_irq_mask)
 {
        uint32_t sdeimr = I915_READ(SDEIMR);
        sdeimr &= ~interrupt_mask;
@@ -426,160 +336,6 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
        I915_WRITE(SDEIMR, sdeimr);
        POSTING_READ(SDEIMR);
 }
-#define ibx_enable_display_interrupt(dev_priv, bits) \
-       ibx_display_interrupt_update((dev_priv), (bits), (bits))
-#define ibx_disable_display_interrupt(dev_priv, bits) \
-       ibx_display_interrupt_update((dev_priv), (bits), 0)
-
-static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
-                                           enum transcoder pch_transcoder,
-                                           bool enable)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
-                      SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
-
-       if (enable)
-               ibx_enable_display_interrupt(dev_priv, bit);
-       else
-               ibx_disable_display_interrupt(dev_priv, bit);
-}
-
-static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
-                                           enum transcoder pch_transcoder,
-                                           bool enable, bool old)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (enable) {
-               I915_WRITE(SERR_INT,
-                          SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
-
-               if (!cpt_can_enable_serr_int(dev))
-                       return;
-
-               ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
-       } else {
-               ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
-
-               if (old && I915_READ(SERR_INT) &
-                   SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
-                       DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
-                                 transcoder_name(pch_transcoder));
-               }
-       }
-}
-
-/**
- * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
- * @dev: drm device
- * @pipe: pipe
- * @enable: true if we want to report FIFO underrun errors, false otherwise
- *
- * This function makes us disable or enable CPU fifo underruns for a specific
- * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
- * reporting for one pipe may also disable all the other CPU error interruts for
- * the other pipes, due to the fact that there's just one interrupt mask/enable
- * bit for all the pipes.
- *
- * Returns the previous state of underrun reporting.
- */
-static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
-                                                   enum pipe pipe, bool enable)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       bool old;
-
-       assert_spin_locked(&dev_priv->irq_lock);
-
-       old = !intel_crtc->cpu_fifo_underrun_disabled;
-       intel_crtc->cpu_fifo_underrun_disabled = !enable;
-
-       if (HAS_GMCH_DISPLAY(dev))
-               i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
-       else if (IS_GEN5(dev) || IS_GEN6(dev))
-               ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
-       else if (IS_GEN7(dev))
-               ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
-       else if (IS_GEN8(dev))
-               broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
-
-       return old;
-}
-
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
-                                          enum pipe pipe, bool enable)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-       bool ret;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return ret;
-}
-
-static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
-                                                 enum pipe pipe)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-       return !intel_crtc->cpu_fifo_underrun_disabled;
-}
-
-/**
- * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
- * @dev: drm device
- * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
- * @enable: true if we want to report FIFO underrun errors, false otherwise
- *
- * This function makes us disable or enable PCH fifo underruns for a specific
- * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
- * underrun reporting for one transcoder may also disable all the other PCH
- * error interruts for the other transcoders, due to the fact that there's just
- * one interrupt mask/enable bit for all the transcoders.
- *
- * Returns the previous state of underrun reporting.
- */
-bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
-                                          enum transcoder pch_transcoder,
-                                          bool enable)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       unsigned long flags;
-       bool old;
-
-       /*
-        * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
-        * has only one pch transcoder A that all pipes can use. To avoid racy
-        * pch transcoder -> pipe lookups from interrupt code simply store the
-        * underrun statistics in crtc A. Since we never expose this anywhere
-        * nor use it outside of the fifo underrun code here using the "wrong"
-        * crtc on LPT won't cause issues.
-        */
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
-       old = !intel_crtc->pch_fifo_underrun_disabled;
-       intel_crtc->pch_fifo_underrun_disabled = !enable;
-
-       if (HAS_PCH_IBX(dev))
-               ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
-       else
-               cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
-
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-       return old;
-}
-
 
 static void
 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -589,6 +345,7 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
        u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
 
        assert_spin_locked(&dev_priv->irq_lock);
+       WARN_ON(!intel_irqs_enabled(dev_priv));
 
        if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
                      status_mask & ~PIPESTAT_INT_STATUS_MASK,
@@ -615,6 +372,7 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
        u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
 
        assert_spin_locked(&dev_priv->irq_lock);
+       WARN_ON(!intel_irqs_enabled(dev_priv));
 
        if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
                      status_mask & ~PIPESTAT_INT_STATUS_MASK,
@@ -694,19 +452,18 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
 static void i915_enable_asle_pipestat(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long irqflags;
 
        if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
                return;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
 
        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
        if (INTEL_INFO(dev)->gen >= 4)
                i915_enable_pipestat(dev_priv, PIPE_A,
                                     PIPE_LEGACY_BLC_EVENT_STATUS);
 
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 /**
@@ -1094,18 +851,17 @@ static void i915_digport_work_func(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
                container_of(work, struct drm_i915_private, dig_port_work);
-       unsigned long irqflags;
        u32 long_port_mask, short_port_mask;
        struct intel_digital_port *intel_dig_port;
        int i, ret;
        u32 old_bits = 0;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        long_port_mask = dev_priv->long_hpd_port_mask;
        dev_priv->long_hpd_port_mask = 0;
        short_port_mask = dev_priv->short_hpd_port_mask;
        dev_priv->short_hpd_port_mask = 0;
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        for (i = 0; i < I915_MAX_PORTS; i++) {
                bool valid = false;
@@ -1130,9 +886,9 @@ static void i915_digport_work_func(struct work_struct *work)
        }
 
        if (old_bits) {
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               spin_lock_irq(&dev_priv->irq_lock);
                dev_priv->hpd_event_bits |= old_bits;
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+               spin_unlock_irq(&dev_priv->irq_lock);
                schedule_work(&dev_priv->hotplug_work);
        }
 }
@@ -1151,7 +907,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
        struct intel_connector *intel_connector;
        struct intel_encoder *intel_encoder;
        struct drm_connector *connector;
-       unsigned long irqflags;
        bool hpd_disabled = false;
        bool changed = false;
        u32 hpd_event_bits;
@@ -1159,7 +914,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
        mutex_lock(&mode_config->mutex);
        DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
 
        hpd_event_bits = dev_priv->hpd_event_bits;
        dev_priv->hpd_event_bits = 0;
@@ -1193,7 +948,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
                                 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
        }
 
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        list_for_each_entry(connector, &mode_config->connector_list, head) {
                intel_connector = to_intel_connector(connector);
@@ -1260,11 +1015,7 @@ static void notify_ring(struct drm_device *dev,
 
        trace_i915_gem_request_complete(ring);
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               intel_notify_mmio_flip(ring);
-
        wake_up_all(&ring->irq_queue);
-       i915_queue_hangcheck(dev);
 }
 
 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
@@ -1400,14 +1151,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
        int new_delay, adj;
 
        spin_lock_irq(&dev_priv->irq_lock);
+       /* Speed up work cancelation during disabling rps interrupts. */
+       if (!dev_priv->rps.interrupts_enabled) {
+               spin_unlock_irq(&dev_priv->irq_lock);
+               return;
+       }
        pm_iir = dev_priv->rps.pm_iir;
        dev_priv->rps.pm_iir = 0;
-       if (INTEL_INFO(dev_priv->dev)->gen >= 8)
-               gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
-       else {
-               /* Make sure not to corrupt PMIMR state used by ringbuffer */
-               gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
-       }
+       /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
+       gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
        spin_unlock_irq(&dev_priv->irq_lock);
 
        /* Make sure we didn't queue anything we're not going to process. */
@@ -1488,7 +1240,6 @@ static void ivybridge_parity_work(struct work_struct *work)
        u32 error_status, row, bank, subbank;
        char *parity_event[6];
        uint32_t misccpctl;
-       unsigned long flags;
        uint8_t slice = 0;
 
        /* We must turn off DOP level clock gating to access the L3 registers.
@@ -1547,9 +1298,9 @@ static void ivybridge_parity_work(struct work_struct *work)
 
 out:
        WARN_ON(dev_priv->l3_parity.which_slice);
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       spin_lock_irq(&dev_priv->irq_lock);
        gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        mutex_unlock(&dev_priv->dev->struct_mutex);
 }
@@ -1601,28 +1352,13 @@ static void snb_gt_irq_handler(struct drm_device *dev,
 
        if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
                      GT_BSD_CS_ERROR_INTERRUPT |
-                     GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
-               i915_handle_error(dev, false, "GT error interrupt 0x%08x",
-                                 gt_iir);
-       }
+                     GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
+               DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
 
        if (gt_iir & GT_PARITY_ERROR(dev))
                ivybridge_parity_error_irq_handler(dev, gt_iir);
 }
 
-static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
-{
-       if ((pm_iir & dev_priv->pm_rps_events) == 0)
-               return;
-
-       spin_lock(&dev_priv->irq_lock);
-       dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
-       gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
-       spin_unlock(&dev_priv->irq_lock);
-
-       queue_work(dev_priv->wq, &dev_priv->rps.work);
-}
-
 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
                                       struct drm_i915_private *dev_priv,
                                       u32 master_ctl)
@@ -1684,7 +1420,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
                        I915_WRITE(GEN8_GT_IIR(2),
                                   tmp & dev_priv->pm_rps_events);
                        ret = IRQ_HANDLED;
-                       gen8_rps_irq_handler(dev_priv, tmp);
+                       gen6_rps_irq_handler(dev_priv, tmp);
                } else
                        DRM_ERROR("The master control interrupt lied (PM)!\n");
        }
@@ -1898,7 +1634,7 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
 
        if (!pipe_crc->entries) {
                spin_unlock(&pipe_crc->lock);
-               DRM_ERROR("spurious interrupt\n");
+               DRM_DEBUG_KMS("spurious interrupt\n");
                return;
        }
 
@@ -1984,24 +1720,30 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
  * the work queue. */
 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 {
+       /* TODO: RPS on GEN9+ is not supported yet. */
+       if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
+                     "GEN9+: unexpected RPS IRQ\n"))
+               return;
+
        if (pm_iir & dev_priv->pm_rps_events) {
                spin_lock(&dev_priv->irq_lock);
-               dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
                gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+               if (dev_priv->rps.interrupts_enabled) {
+                       dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
+                       queue_work(dev_priv->wq, &dev_priv->rps.work);
+               }
                spin_unlock(&dev_priv->irq_lock);
-
-               queue_work(dev_priv->wq, &dev_priv->rps.work);
        }
 
+       if (INTEL_INFO(dev_priv)->gen >= 8)
+               return;
+
        if (HAS_VEBOX(dev_priv->dev)) {
                if (pm_iir & PM_VEBOX_USER_INTERRUPT)
                        notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
 
-               if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
-                       i915_handle_error(dev_priv->dev, false,
-                                         "VEBOX CS error interrupt 0x%08x",
-                                         pm_iir);
-               }
+               if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+                       DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
        }
 }
 
@@ -2031,9 +1773,9 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
                 * we need to be careful that we only handle what we want to
                 * handle.
                 */
-               mask = 0;
-               if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
-                       mask |= PIPE_FIFO_UNDERRUN_STATUS;
+
+               /* fifo underruns are filterered in the underrun handler. */
+               mask = PIPE_FIFO_UNDERRUN_STATUS;
 
                switch (pipe) {
                case PIPE_A:
@@ -2078,9 +1820,8 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
                if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
                        i9xx_pipe_crc_irq_handler(dev, pipe);
 
-               if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
-                   intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
-                       DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+               if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+                       intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
        }
 
        if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
@@ -2247,14 +1988,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
                DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
 
        if (pch_iir & SDE_TRANSA_FIFO_UNDER)
-               if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
-                                                         false))
-                       DRM_ERROR("PCH transcoder A FIFO underrun\n");
+               intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
 
        if (pch_iir & SDE_TRANSB_FIFO_UNDER)
-               if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
-                                                         false))
-                       DRM_ERROR("PCH transcoder B FIFO underrun\n");
+               intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
 }
 
 static void ivb_err_int_handler(struct drm_device *dev)
@@ -2267,12 +2004,8 @@ static void ivb_err_int_handler(struct drm_device *dev)
                DRM_ERROR("Poison interrupt\n");
 
        for_each_pipe(dev_priv, pipe) {
-               if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
-                       if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
-                                                                 false))
-                               DRM_ERROR("Pipe %c FIFO underrun\n",
-                                         pipe_name(pipe));
-               }
+               if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
+                       intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
 
                if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
                        if (IS_IVYBRIDGE(dev))
@@ -2294,19 +2027,13 @@ static void cpt_serr_int_handler(struct drm_device *dev)
                DRM_ERROR("PCH poison interrupt\n");
 
        if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
-               if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
-                                                         false))
-                       DRM_ERROR("PCH transcoder A FIFO underrun\n");
+               intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
 
        if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
-               if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
-                                                         false))
-                       DRM_ERROR("PCH transcoder B FIFO underrun\n");
+               intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
 
        if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
-               if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
-                                                         false))
-                       DRM_ERROR("PCH transcoder C FIFO underrun\n");
+               intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
 
        I915_WRITE(SERR_INT, serr_int);
 }
@@ -2372,9 +2099,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
                        intel_check_page_flip(dev, pipe);
 
                if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
-                       if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
-                               DRM_ERROR("Pipe %c FIFO underrun\n",
-                                         pipe_name(pipe));
+                       intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
 
                if (de_iir & DE_PIPE_CRC_DONE(pipe))
                        i9xx_pipe_crc_irq_handler(dev, pipe);
@@ -2524,6 +2249,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
        irqreturn_t ret = IRQ_NONE;
        uint32_t tmp = 0;
        enum pipe pipe;
+       u32 aux_mask = GEN8_AUX_CHANNEL_A;
+
+       if (IS_GEN9(dev))
+               aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
+                       GEN9_AUX_CHANNEL_D;
 
        master_ctl = I915_READ(GEN8_MASTER_IRQ);
        master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
@@ -2556,7 +2286,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                if (tmp) {
                        I915_WRITE(GEN8_DE_PORT_IIR, tmp);
                        ret = IRQ_HANDLED;
-                       if (tmp & GEN8_AUX_CHANNEL_A)
+
+                       if (tmp & aux_mask)
                                dp_aux_irq_handler(dev);
                        else
                                DRM_ERROR("Unexpected DE Port interrupt\n");
@@ -2566,7 +2297,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
        }
 
        for_each_pipe(dev_priv, pipe) {
-               uint32_t pipe_iir;
+               uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
 
                if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
                        continue;
@@ -2575,11 +2306,17 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                if (pipe_iir) {
                        ret = IRQ_HANDLED;
                        I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+
                        if (pipe_iir & GEN8_PIPE_VBLANK &&
                            intel_pipe_handle_vblank(dev, pipe))
                                intel_check_page_flip(dev, pipe);
 
-                       if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
+                       if (IS_GEN9(dev))
+                               flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
+                       else
+                               flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
+
+                       if (flip_done) {
                                intel_prepare_page_flip(dev, pipe);
                                intel_finish_page_flip_plane(dev, pipe);
                        }
@@ -2587,18 +2324,20 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                        if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
                                hsw_pipe_crc_irq_handler(dev, pipe);
 
-                       if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
-                               if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
-                                                                         false))
-                                       DRM_ERROR("Pipe %c FIFO underrun\n",
-                                                 pipe_name(pipe));
-                       }
+                       if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
+                               intel_cpu_fifo_underrun_irq_handler(dev_priv,
+                                                                   pipe);
+
 
-                       if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
+                       if (IS_GEN9(dev))
+                               fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+                       else
+                               fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+
+                       if (fault_errors)
                                DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
                                          pipe_name(pipe),
                                          pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
-                       }
                } else
                        DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
        }
@@ -2697,6 +2436,9 @@ static void i915_error_work_func(struct work_struct *work)
                 * simulated reset via debugs, so get an RPM reference.
                 */
                intel_runtime_pm_get(dev_priv);
+
+               intel_prepare_reset(dev);
+
                /*
                 * All state reset _must_ be completed before we update the
                 * reset counter, for otherwise waiters might miss the reset
@@ -2705,7 +2447,7 @@ static void i915_error_work_func(struct work_struct *work)
                 */
                ret = i915_reset(dev);
 
-               intel_display_handle_reset(dev);
+               intel_finish_reset(dev);
 
                intel_runtime_pm_put(dev_priv);
 
@@ -3330,10 +3072,15 @@ static void i915_hangcheck_elapsed(unsigned long data)
 void i915_queue_hangcheck(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;
+
        if (!i915.enable_hangcheck)
                return;
 
-       mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+       /* Don't continually defer the hangcheck, but make sure it is active */
+       if (timer_pending(timer))
+               return;
+       mod_timer(timer,
                  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
 }
 
@@ -3396,10 +3143,22 @@ static void ironlake_irq_reset(struct drm_device *dev)
        ibx_irq_reset(dev);
 }
 
+static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+{
+       enum pipe pipe;
+
+       I915_WRITE(PORT_HOTPLUG_EN, 0);
+       I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+       for_each_pipe(dev_priv, pipe)
+               I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+       GEN5_IRQ_RESET(VLV_);
+}
+
 static void valleyview_irq_preinstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipe;
 
        /* VLV magic */
        I915_WRITE(VLV_IMR, 0);
@@ -3407,22 +3166,11 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
        I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
        I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
 
-       /* and GT */
-       I915_WRITE(GTIIR, I915_READ(GTIIR));
-       I915_WRITE(GTIIR, I915_READ(GTIIR));
-
        gen5_gt_irq_reset(dev);
 
-       I915_WRITE(DPINVGTT, 0xff);
+       I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
 
-       I915_WRITE(PORT_HOTPLUG_EN, 0);
-       I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
-       for_each_pipe(dev_priv, pipe)
-               I915_WRITE(PIPESTAT(pipe), 0xffff);
-       I915_WRITE(VLV_IIR, 0xffffffff);
-       I915_WRITE(VLV_IMR, 0xffffffff);
-       I915_WRITE(VLV_IER, 0x0);
-       POSTING_READ(VLV_IER);
+       vlv_display_irq_reset(dev_priv);
 }
 
 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
@@ -3444,8 +3192,8 @@ static void gen8_irq_reset(struct drm_device *dev)
        gen8_gt_irq_reset(dev_priv);
 
        for_each_pipe(dev_priv, pipe)
-               if (intel_display_power_enabled(dev_priv,
-                                               POWER_DOMAIN_PIPE(pipe)))
+               if (intel_display_power_is_enabled(dev_priv,
+                                                  POWER_DOMAIN_PIPE(pipe)))
                        GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
 
        GEN5_IRQ_RESET(GEN8_DE_PORT_);
@@ -3457,21 +3205,19 @@ static void gen8_irq_reset(struct drm_device *dev)
 
 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
 {
-       unsigned long irqflags;
        uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
                          ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
        GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
                          ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void cherryview_irq_preinstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipe;
 
        I915_WRITE(GEN8_MASTER_IRQ, 0);
        POSTING_READ(GEN8_MASTER_IRQ);
@@ -3480,20 +3226,9 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
 
        GEN5_IRQ_RESET(GEN8_PCU_);
 
-       POSTING_READ(GEN8_PCU_IIR);
-
        I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
 
-       I915_WRITE(PORT_HOTPLUG_EN, 0);
-       I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
-
-       for_each_pipe(dev_priv, pipe)
-               I915_WRITE(PIPESTAT(pipe), 0xffff);
-
-       I915_WRITE(VLV_IMR, 0xffffffff);
-       I915_WRITE(VLV_IER, 0x0);
-       I915_WRITE(VLV_IIR, 0xffffffff);
-       POSTING_READ(VLV_IIR);
+       vlv_display_irq_reset(dev_priv);
 }
 
 static void ibx_hpd_irq_setup(struct drm_device *dev)
@@ -3584,7 +3319,6 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
 
 static int ironlake_irq_postinstall(struct drm_device *dev)
 {
-       unsigned long irqflags;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 display_mask, extra_mask;
 
@@ -3623,9 +3357,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
                 * spinlocking not required here for correctness since interrupt
                 * setup is guaranteed to run in single-threaded context. But we
                 * need it to make the assert_spin_locked happy. */
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               spin_lock_irq(&dev_priv->irq_lock);
                ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+               spin_unlock_irq(&dev_priv->irq_lock);
        }
 
        return 0;
@@ -3635,45 +3369,51 @@ static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
 {
        u32 pipestat_mask;
        u32 iir_mask;
+       enum pipe pipe;
 
        pipestat_mask = PIPESTAT_INT_STATUS_MASK |
                        PIPE_FIFO_UNDERRUN_STATUS;
 
-       I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
-       I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
+       for_each_pipe(dev_priv, pipe)
+               I915_WRITE(PIPESTAT(pipe), pipestat_mask);
        POSTING_READ(PIPESTAT(PIPE_A));
 
        pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
                        PIPE_CRC_DONE_INTERRUPT_STATUS;
 
-       i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
-                                              PIPE_GMBUS_INTERRUPT_STATUS);
-       i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
+       i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+       for_each_pipe(dev_priv, pipe)
+                     i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
 
        iir_mask = I915_DISPLAY_PORT_INTERRUPT |
                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+       if (IS_CHERRYVIEW(dev_priv))
+               iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
        dev_priv->irq_mask &= ~iir_mask;
 
        I915_WRITE(VLV_IIR, iir_mask);
        I915_WRITE(VLV_IIR, iir_mask);
-       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
        I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
-       POSTING_READ(VLV_IER);
+       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+       POSTING_READ(VLV_IMR);
 }
 
 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
 {
        u32 pipestat_mask;
        u32 iir_mask;
+       enum pipe pipe;
 
        iir_mask = I915_DISPLAY_PORT_INTERRUPT |
                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+       if (IS_CHERRYVIEW(dev_priv))
+               iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
 
        dev_priv->irq_mask |= iir_mask;
-       I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
        I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+       I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
        I915_WRITE(VLV_IIR, iir_mask);
        I915_WRITE(VLV_IIR, iir_mask);
        POSTING_READ(VLV_IIR);
@@ -3681,14 +3421,15 @@ static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
        pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
                        PIPE_CRC_DONE_INTERRUPT_STATUS;
 
-       i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
-                                               PIPE_GMBUS_INTERRUPT_STATUS);
-       i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
+       i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+       for_each_pipe(dev_priv, pipe)
+               i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
 
        pipestat_mask = PIPESTAT_INT_STATUS_MASK |
                        PIPE_FIFO_UNDERRUN_STATUS;
-       I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
-       I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
+
+       for_each_pipe(dev_priv, pipe)
+               I915_WRITE(PIPESTAT(pipe), pipestat_mask);
        POSTING_READ(PIPESTAT(PIPE_A));
 }
 
@@ -3701,7 +3442,7 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
 
        dev_priv->display_irqs_enabled = true;
 
-       if (dev_priv->dev->irq_enabled)
+       if (intel_irqs_enabled(dev_priv))
                valleyview_display_irqs_install(dev_priv);
 }
 
@@ -3714,34 +3455,36 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
 
        dev_priv->display_irqs_enabled = false;
 
-       if (dev_priv->dev->irq_enabled)
+       if (intel_irqs_enabled(dev_priv))
                valleyview_display_irqs_uninstall(dev_priv);
 }
 
-static int valleyview_irq_postinstall(struct drm_device *dev)
+static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long irqflags;
-
        dev_priv->irq_mask = ~0;
 
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        POSTING_READ(PORT_HOTPLUG_EN);
 
-       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-       I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
        I915_WRITE(VLV_IIR, 0xffffffff);
-       POSTING_READ(VLV_IER);
+       I915_WRITE(VLV_IIR, 0xffffffff);
+       I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
+       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+       POSTING_READ(VLV_IMR);
 
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        if (dev_priv->display_irqs_enabled)
                valleyview_display_irqs_install(dev_priv);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
+}
 
-       I915_WRITE(VLV_IIR, 0xffffffff);
-       I915_WRITE(VLV_IIR, 0xffffffff);
+static int valleyview_irq_postinstall(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       vlv_display_irq_postinstall(dev_priv);
 
        gen5_gt_irq_postinstall(dev);
 
@@ -3783,24 +3526,35 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
 
 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-       uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
-               GEN8_PIPE_CDCLK_CRC_DONE |
-               GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
-       uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
-               GEN8_PIPE_FIFO_UNDERRUN;
+       uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
+       uint32_t de_pipe_enables;
        int pipe;
+       u32 aux_en = GEN8_AUX_CHANNEL_A;
+
+       if (IS_GEN9(dev_priv)) {
+               de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
+                                 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+               aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
+                       GEN9_AUX_CHANNEL_D;
+       } else
+               de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
+                                 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+
+       de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
+                                          GEN8_PIPE_FIFO_UNDERRUN;
+
        dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
        dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
        dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
 
        for_each_pipe(dev_priv, pipe)
-               if (intel_display_power_enabled(dev_priv,
+               if (intel_display_power_is_enabled(dev_priv,
                                POWER_DOMAIN_PIPE(pipe)))
                        GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
                                          dev_priv->de_irq_mask[pipe],
                                          de_pipe_enables);
 
-       GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
+       GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
 }
 
 static int gen8_irq_postinstall(struct drm_device *dev)
@@ -3823,33 +3577,8 @@ static int gen8_irq_postinstall(struct drm_device *dev)
 static int cherryview_irq_postinstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
-               I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
-               I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
-               I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
-       u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
-               PIPE_CRC_DONE_INTERRUPT_STATUS;
-       unsigned long irqflags;
-       int pipe;
 
-       /*
-        * Leave vblank interrupts masked initially.  enable/disable will
-        * toggle them based on usage.
-        */
-       dev_priv->irq_mask = ~enable_mask;
-
-       for_each_pipe(dev_priv, pipe)
-               I915_WRITE(PIPESTAT(pipe), 0xffff);
-
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
-       for_each_pipe(dev_priv, pipe)
-               i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
-       I915_WRITE(VLV_IIR, 0xffffffff);
-       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-       I915_WRITE(VLV_IER, enable_mask);
+       vlv_display_irq_postinstall(dev_priv);
 
        gen8_gt_irq_postinstall(dev_priv);
 
@@ -3869,41 +3598,39 @@ static void gen8_irq_uninstall(struct drm_device *dev)
        gen8_irq_reset(dev);
 }
 
+static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
+{
+       /* Interrupt setup is already guaranteed to be single-threaded, this is
+        * just to make the assert_spin_locked check happy. */
+       spin_lock_irq(&dev_priv->irq_lock);
+       if (dev_priv->display_irqs_enabled)
+               valleyview_display_irqs_uninstall(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       vlv_display_irq_reset(dev_priv);
+
+       dev_priv->irq_mask = 0;
+}
+
 static void valleyview_irq_uninstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long irqflags;
-       int pipe;
 
        if (!dev_priv)
                return;
 
        I915_WRITE(VLV_MASTER_IER, 0);
 
-       for_each_pipe(dev_priv, pipe)
-               I915_WRITE(PIPESTAT(pipe), 0xffff);
+       gen5_gt_irq_reset(dev);
 
        I915_WRITE(HWSTAM, 0xffffffff);
-       I915_WRITE(PORT_HOTPLUG_EN, 0);
-       I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
-
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       if (dev_priv->display_irqs_enabled)
-               valleyview_display_irqs_uninstall(dev_priv);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
-       dev_priv->irq_mask = 0;
-
-       I915_WRITE(VLV_IIR, 0xffffffff);
-       I915_WRITE(VLV_IMR, 0xffffffff);
-       I915_WRITE(VLV_IER, 0x0);
-       POSTING_READ(VLV_IER);
+       vlv_display_irq_uninstall(dev_priv);
 }
 
 static void cherryview_irq_uninstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipe;
 
        if (!dev_priv)
                return;
@@ -3911,44 +3638,11 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
        I915_WRITE(GEN8_MASTER_IRQ, 0);
        POSTING_READ(GEN8_MASTER_IRQ);
 
-#define GEN8_IRQ_FINI_NDX(type, which)                         \
-do {                                                           \
-       I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff);       \
-       I915_WRITE(GEN8_##type##_IER(which), 0);                \
-       I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff);       \
-       POSTING_READ(GEN8_##type##_IIR(which));                 \
-       I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff);       \
-} while (0)
-
-#define GEN8_IRQ_FINI(type)                            \
-do {                                                   \
-       I915_WRITE(GEN8_##type##_IMR, 0xffffffff);      \
-       I915_WRITE(GEN8_##type##_IER, 0);               \
-       I915_WRITE(GEN8_##type##_IIR, 0xffffffff);      \
-       POSTING_READ(GEN8_##type##_IIR);                \
-       I915_WRITE(GEN8_##type##_IIR, 0xffffffff);      \
-} while (0)
-
-       GEN8_IRQ_FINI_NDX(GT, 0);
-       GEN8_IRQ_FINI_NDX(GT, 1);
-       GEN8_IRQ_FINI_NDX(GT, 2);
-       GEN8_IRQ_FINI_NDX(GT, 3);
-
-       GEN8_IRQ_FINI(PCU);
-
-#undef GEN8_IRQ_FINI
-#undef GEN8_IRQ_FINI_NDX
-
-       I915_WRITE(PORT_HOTPLUG_EN, 0);
-       I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+       gen8_gt_irq_reset(dev_priv);
 
-       for_each_pipe(dev_priv, pipe)
-               I915_WRITE(PIPESTAT(pipe), 0xffff);
+       GEN5_IRQ_RESET(GEN8_PCU_);
 
-       I915_WRITE(VLV_IMR, 0xffffffff);
-       I915_WRITE(VLV_IER, 0x0);
-       I915_WRITE(VLV_IIR, 0xffffffff);
-       POSTING_READ(VLV_IIR);
+       vlv_display_irq_uninstall(dev_priv);
 }
 
 static void ironlake_irq_uninstall(struct drm_device *dev)
@@ -3976,7 +3670,6 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
 static int i8xx_irq_postinstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long irqflags;
 
        I915_WRITE16(EMR,
                     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -3999,10 +3692,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
 
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        return 0;
 }
@@ -4047,7 +3740,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u16 iir, new_iir;
        u32 pipe_stats[2];
-       unsigned long irqflags;
        int pipe;
        u16 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4063,11 +3755,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                 * It doesn't set the bit in iir again, but it still produces
                 * interrupts (for non-MSI).
                 */
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               spin_lock(&dev_priv->irq_lock);
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-                       i915_handle_error(dev, false,
-                                         "Command parser error, iir 0x%08x",
-                                         iir);
+                       DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
 
                for_each_pipe(dev_priv, pipe) {
                        int reg = PIPESTAT(pipe);
@@ -4079,13 +3769,11 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                        if (pipe_stats[pipe] & 0x8000ffff)
                                I915_WRITE(reg, pipe_stats[pipe]);
                }
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+               spin_unlock(&dev_priv->irq_lock);
 
                I915_WRITE16(IIR, iir & ~flip_mask);
                new_iir = I915_READ16(IIR); /* Flush posted writes */
 
-               i915_update_dri1_breadcrumb(dev);
-
                if (iir & I915_USER_INTERRUPT)
                        notify_ring(dev, &dev_priv->ring[RCS]);
 
@@ -4101,9 +3789,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
                                i9xx_pipe_crc_irq_handler(dev, pipe);
 
-                       if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
-                           intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
-                               DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+                       if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+                               intel_cpu_fifo_underrun_irq_handler(dev_priv,
+                                                                   pipe);
                }
 
                iir = new_iir;
@@ -4149,7 +3837,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 enable_mask;
-       unsigned long irqflags;
 
        I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
@@ -4187,10 +3874,10 @@ static int i915_irq_postinstall(struct drm_device *dev)
 
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        return 0;
 }
@@ -4234,7 +3921,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
        struct drm_device *dev = arg;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
-       unsigned long irqflags;
        u32 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
@@ -4250,11 +3936,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                 * It doesn't set the bit in iir again, but it still produces
                 * interrupts (for non-MSI).
                 */
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               spin_lock(&dev_priv->irq_lock);
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-                       i915_handle_error(dev, false,
-                                         "Command parser error, iir 0x%08x",
-                                         iir);
+                       DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
 
                for_each_pipe(dev_priv, pipe) {
                        int reg = PIPESTAT(pipe);
@@ -4266,7 +3950,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                                irq_received = true;
                        }
                }
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+               spin_unlock(&dev_priv->irq_lock);
 
                if (!irq_received)
                        break;
@@ -4297,9 +3981,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
                                i9xx_pipe_crc_irq_handler(dev, pipe);
 
-                       if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
-                           intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
-                               DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+                       if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+                               intel_cpu_fifo_underrun_irq_handler(dev_priv,
+                                                                   pipe);
                }
 
                if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -4324,8 +4008,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                iir = new_iir;
        } while (iir & ~flip_mask);
 
-       i915_update_dri1_breadcrumb(dev);
-
        return ret;
 }
 
@@ -4372,7 +4054,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 enable_mask;
        u32 error_mask;
-       unsigned long irqflags;
 
        /* Unmask the interrupts that we always want on. */
        dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
@@ -4393,11 +4074,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
 
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        /*
         * Enable some error detection, note the instruction error mask
@@ -4462,7 +4143,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 iir, new_iir;
        u32 pipe_stats[I915_MAX_PIPES];
-       unsigned long irqflags;
        int ret = IRQ_NONE, pipe;
        u32 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4479,11 +4159,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                 * It doesn't set the bit in iir again, but it still produces
                 * interrupts (for non-MSI).
                 */
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               spin_lock(&dev_priv->irq_lock);
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-                       i915_handle_error(dev, false,
-                                         "Command parser error, iir 0x%08x",
-                                         iir);
+                       DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
 
                for_each_pipe(dev_priv, pipe) {
                        int reg = PIPESTAT(pipe);
@@ -4497,7 +4175,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                                irq_received = true;
                        }
                }
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+               spin_unlock(&dev_priv->irq_lock);
 
                if (!irq_received)
                        break;
@@ -4527,9 +4205,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
                                i9xx_pipe_crc_irq_handler(dev, pipe);
 
-                       if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
-                           intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
-                               DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+                       if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+                               intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
                }
 
                if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -4556,8 +4233,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                iir = new_iir;
        }
 
-       i915_update_dri1_breadcrumb(dev);
-
        return ret;
 }
 
@@ -4584,19 +4259,18 @@ static void i965_irq_uninstall(struct drm_device * dev)
        I915_WRITE(IIR, I915_READ(IIR));
 }
 
-static void intel_hpd_irq_reenable(struct work_struct *work)
+static void intel_hpd_irq_reenable_work(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
                container_of(work, typeof(*dev_priv),
                             hotplug_reenable_work.work);
        struct drm_device *dev = dev_priv->dev;
        struct drm_mode_config *mode_config = &dev->mode_config;
-       unsigned long irqflags;
        int i;
 
        intel_runtime_pm_get(dev_priv);
 
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
                struct drm_connector *connector;
 
@@ -4620,14 +4294,21 @@ static void intel_hpd_irq_reenable(struct work_struct *work)
        }
        if (dev_priv->display.hpd_irq_setup)
                dev_priv->display.hpd_irq_setup(dev);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        intel_runtime_pm_put(dev_priv);
 }
 
-void intel_irq_init(struct drm_device *dev)
+/**
+ * intel_irq_init - initializes irq support
+ * @dev_priv: i915 device instance
+ *
+ * This function initializes all the irq support including work items, timers
+ * and all the vtables. It does not setup the interrupt itself though.
+ */
+void intel_irq_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_device *dev = dev_priv->dev;
 
        INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
        INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
@@ -4636,7 +4317,7 @@ void intel_irq_init(struct drm_device *dev)
        INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 
        /* Let's track the enabled rps events */
-       if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
+       if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
                /* WaGsvRC0ResidencyMethod:vlv */
                dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
        else
@@ -4646,17 +4327,14 @@ void intel_irq_init(struct drm_device *dev)
                    i915_hangcheck_elapsed,
                    (unsigned long) dev);
        INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
-                         intel_hpd_irq_reenable);
+                         intel_hpd_irq_reenable_work);
 
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
-       /* Haven't installed the IRQ handler yet */
-       dev_priv->pm._irqs_disabled = true;
-
-       if (IS_GEN2(dev)) {
+       if (IS_GEN2(dev_priv)) {
                dev->max_vblank_count = 0;
                dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
-       } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+       } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
                dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
                dev->driver->get_vblank_counter = gm45_get_vblank_counter;
        } else {
@@ -4669,7 +4347,7 @@ void intel_irq_init(struct drm_device *dev)
         * Gen2 doesn't have a hardware frame counter and so depends on
         * vblank interrupts to produce sane vblank seuquence numbers.
         */
-       if (!IS_GEN2(dev))
+       if (!IS_GEN2(dev_priv))
                dev->vblank_disable_immediate = true;
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -4677,7 +4355,7 @@ void intel_irq_init(struct drm_device *dev)
                dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
        }
 
-       if (IS_CHERRYVIEW(dev)) {
+       if (IS_CHERRYVIEW(dev_priv)) {
                dev->driver->irq_handler = cherryview_irq_handler;
                dev->driver->irq_preinstall = cherryview_irq_preinstall;
                dev->driver->irq_postinstall = cherryview_irq_postinstall;
@@ -4685,7 +4363,7 @@ void intel_irq_init(struct drm_device *dev)
                dev->driver->enable_vblank = valleyview_enable_vblank;
                dev->driver->disable_vblank = valleyview_disable_vblank;
                dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
-       } else if (IS_VALLEYVIEW(dev)) {
+       } else if (IS_VALLEYVIEW(dev_priv)) {
                dev->driver->irq_handler = valleyview_irq_handler;
                dev->driver->irq_preinstall = valleyview_irq_preinstall;
                dev->driver->irq_postinstall = valleyview_irq_postinstall;
@@ -4693,7 +4371,7 @@ void intel_irq_init(struct drm_device *dev)
                dev->driver->enable_vblank = valleyview_enable_vblank;
                dev->driver->disable_vblank = valleyview_disable_vblank;
                dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
-       } else if (IS_GEN8(dev)) {
+       } else if (INTEL_INFO(dev_priv)->gen >= 8) {
                dev->driver->irq_handler = gen8_irq_handler;
                dev->driver->irq_preinstall = gen8_irq_reset;
                dev->driver->irq_postinstall = gen8_irq_postinstall;
@@ -4710,12 +4388,12 @@ void intel_irq_init(struct drm_device *dev)
                dev->driver->disable_vblank = ironlake_disable_vblank;
                dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
        } else {
-               if (INTEL_INFO(dev)->gen == 2) {
+               if (INTEL_INFO(dev_priv)->gen == 2) {
                        dev->driver->irq_preinstall = i8xx_irq_preinstall;
                        dev->driver->irq_postinstall = i8xx_irq_postinstall;
                        dev->driver->irq_handler = i8xx_irq_handler;
                        dev->driver->irq_uninstall = i8xx_irq_uninstall;
-               } else if (INTEL_INFO(dev)->gen == 3) {
+               } else if (INTEL_INFO(dev_priv)->gen == 3) {
                        dev->driver->irq_preinstall = i915_irq_preinstall;
                        dev->driver->irq_postinstall = i915_irq_postinstall;
                        dev->driver->irq_uninstall = i915_irq_uninstall;
@@ -4733,12 +4411,23 @@ void intel_irq_init(struct drm_device *dev)
        }
 }
 
-void intel_hpd_init(struct drm_device *dev)
+/**
+ * intel_hpd_init - initializes and enables hpd support
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hotplug support. It requires that interrupts have
+ * already been enabled with intel_irq_init_hw(). From this point on hotplug and
+ * poll request can run concurrently to other code, so locking rules must be
+ * obeyed.
+ *
+ * This is a separate step from interrupt enabling to simplify the locking rules
+ * in the driver load and resume code.
+ */
+void intel_hpd_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_device *dev = dev_priv->dev;
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct drm_connector *connector;
-       unsigned long irqflags;
        int i;
 
        for (i = 1; i < HPD_NUM_PINS; i++) {
@@ -4756,27 +4445,72 @@ void intel_hpd_init(struct drm_device *dev)
 
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked checks happy. */
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        if (dev_priv->display.hpd_irq_setup)
                dev_priv->display.hpd_irq_setup(dev);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-/* Disable interrupts so we can allow runtime PM. */
-void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
+/**
+ * intel_irq_install - enables the hardware interrupt
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hardware interrupt handling, but leaves the hotplug
+ * handling still disabled. It is called after intel_irq_init().
+ *
+ * In the driver load and resume code we need working interrupts in a few places
+ * but don't want to deal with the hassle of concurrent probe and hotplug
+ * workers. Hence the split into this two-stage approach.
+ */
+int intel_irq_install(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       /*
+        * We enable some interrupt sources in our postinstall hooks, so mark
+        * interrupts as enabled _before_ actually enabling them to avoid
+        * special cases in our ordering checks.
+        */
+       dev_priv->pm.irqs_enabled = true;
 
-       dev->driver->irq_uninstall(dev);
-       dev_priv->pm._irqs_disabled = true;
+       return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
 }
 
-/* Restore interrupts so we can recover from runtime PM. */
-void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
+/**
+ * intel_irq_uninstall - finilizes all irq handling
+ * @dev_priv: i915 device instance
+ *
+ * This stops interrupt and hotplug handling and unregisters and frees all
+ * resources acquired in the init functions.
+ */
+void intel_irq_uninstall(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       drm_irq_uninstall(dev_priv->dev);
+       intel_hpd_cancel_work(dev_priv);
+       dev_priv->pm.irqs_enabled = false;
+}
 
-       dev_priv->pm._irqs_disabled = false;
-       dev->driver->irq_preinstall(dev);
-       dev->driver->irq_postinstall(dev);
+/**
+ * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
+ * @dev_priv: i915 device instance
+ *
+ * This function is used to disable interrupts at runtime, both in the runtime
+ * pm and the system suspend/resume code.
+ */
+void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
+{
+       dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
+       dev_priv->pm.irqs_enabled = false;
+}
+
+/**
+ * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
+ * @dev_priv: i915 device instance
+ *
+ * This function is used to enable interrupts at runtime, both in the runtime
+ * pm and the system suspend/resume code.
+ */
+void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
+{
+       dev_priv->pm.irqs_enabled = true;
+       dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
+       dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
 }
index c01e5f31430ec420f469aacda6c520b39ccb158e..544675895c8dff93f6b5cdf3eb757e5643467f33 100644 (file)
@@ -26,8 +26,8 @@
 #define _I915_REG_H_
 
 #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PLANE(plane, a, b) _PIPE(plane, a, b)
 #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
-
 #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
 #define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
                               (pipe) == PIPE_B ? (b) : (c))
 
 
 /* Graphics reset regs */
-#define I965_GDRST 0xc0 /* PCI config register */
+#define I915_GDRST 0xc0 /* PCI config register */
 #define  GRDOM_FULL    (0<<2)
 #define  GRDOM_RENDER  (1<<2)
 #define  GRDOM_MEDIA   (3<<2)
 #define  GRDOM_MASK    (3<<2)
+#define  GRDOM_RESET_STATUS (1<<1)
 #define  GRDOM_RESET_ENABLE (1<<0)
 
 #define ILK_GDSR 0x2ca4 /* MCHBAR offset */
 #define   MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
 #define   MI_DISPLAY_FLIP_IVB_PLANE_C  (4 << 19)
 #define   MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+/* SKL ones */
+#define   MI_DISPLAY_FLIP_SKL_PLANE_1_A        (0 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_1_B        (1 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_1_C        (2 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_2_A        (4 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_2_B        (5 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_2_C        (6 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_3_A        (7 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_3_B        (8 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_3_C        (9 << 8)
 #define MI_SEMAPHORE_MBOX      MI_INSTR(0x16, 1) /* gen6, gen7 */
 #define   MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
 #define   MI_SEMAPHORE_UPDATE      (1<<21)
 #define   MI_BATCH_GTT             (2<<6) /* aliased with (1<<7) on gen4 */
 #define MI_BATCH_BUFFER_START_GEN8     MI_INSTR(0x31, 1)
 
+#define MI_PREDICATE_SRC0      (0x2400)
+#define MI_PREDICATE_SRC1      (0x2408)
 
 #define MI_PREDICATE_RESULT_2  (0x2214)
 #define  LOWER_SLICE_ENABLED   (1<<0)
@@ -564,6 +577,7 @@ enum punit_power_well {
 #define PUNIT_REG_GPU_LFM                      0xd3
 #define PUNIT_REG_GPU_FREQ_REQ                 0xd4
 #define PUNIT_REG_GPU_FREQ_STS                 0xd8
+#define   GPLLENABLE                           (1<<4)
 #define   GENFREQSTATUS                                (1<<0)
 #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ         0xdc
 #define PUNIT_REG_CZ_TIMESTAMP                 0xce
@@ -672,7 +686,7 @@ enum punit_power_well {
  * need to be accessed during AUX communication,
  *
  * Generally the common lane corresponds to the pipe and
- * the spline (PCS/TX) correponds to the port.
+ * the spline (PCS/TX) corresponds to the port.
  *
  * For dual channel PHY (VLV/CHV):
  *
@@ -796,6 +810,8 @@ enum punit_power_well {
 #define _VLV_PCS_DW0_CH1               0x8400
 #define   DPIO_PCS_TX_LANE2_RESET      (1<<16)
 #define   DPIO_PCS_TX_LANE1_RESET      (1<<7)
+#define   DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4)
+#define   DPIO_RIGHT_TXFIFO_RST_MASTER2        (1<<3)
 #define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
 
 #define _VLV_PCS01_DW0_CH0             0x200
@@ -836,12 +852,31 @@ enum punit_power_well {
 
 #define _VLV_PCS_DW9_CH0               0x8224
 #define _VLV_PCS_DW9_CH1               0x8424
+#define   DPIO_PCS_TX2MARGIN_MASK      (0x7<<13)
+#define   DPIO_PCS_TX2MARGIN_000       (0<<13)
+#define   DPIO_PCS_TX2MARGIN_101       (1<<13)
+#define   DPIO_PCS_TX1MARGIN_MASK      (0x7<<10)
+#define   DPIO_PCS_TX1MARGIN_000       (0<<10)
+#define   DPIO_PCS_TX1MARGIN_101       (1<<10)
 #define        VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
 
+#define _VLV_PCS01_DW9_CH0             0x224
+#define _VLV_PCS23_DW9_CH0             0x424
+#define _VLV_PCS01_DW9_CH1             0x2624
+#define _VLV_PCS23_DW9_CH1             0x2824
+#define VLV_PCS01_DW9(ch) _PORT(ch, _VLV_PCS01_DW9_CH0, _VLV_PCS01_DW9_CH1)
+#define VLV_PCS23_DW9(ch) _PORT(ch, _VLV_PCS23_DW9_CH0, _VLV_PCS23_DW9_CH1)
+
 #define _CHV_PCS_DW10_CH0              0x8228
 #define _CHV_PCS_DW10_CH1              0x8428
 #define   DPIO_PCS_SWING_CALC_TX0_TX2  (1<<30)
 #define   DPIO_PCS_SWING_CALC_TX1_TX3  (1<<31)
+#define   DPIO_PCS_TX2DEEMP_MASK       (0xf<<24)
+#define   DPIO_PCS_TX2DEEMP_9P5                (0<<24)
+#define   DPIO_PCS_TX2DEEMP_6P0                (2<<24)
+#define   DPIO_PCS_TX1DEEMP_MASK       (0xf<<16)
+#define   DPIO_PCS_TX1DEEMP_9P5                (0<<16)
+#define   DPIO_PCS_TX1DEEMP_6P0                (2<<16)
 #define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
 
 #define _VLV_PCS01_DW10_CH0            0x0228
@@ -853,8 +888,18 @@ enum punit_power_well {
 
 #define _VLV_PCS_DW11_CH0              0x822c
 #define _VLV_PCS_DW11_CH1              0x842c
+#define   DPIO_LANEDESKEW_STRAP_OVRD   (1<<3)
+#define   DPIO_LEFT_TXFIFO_RST_MASTER  (1<<1)
+#define   DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
 #define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
 
+#define _VLV_PCS01_DW11_CH0            0x022c
+#define _VLV_PCS23_DW11_CH0            0x042c
+#define _VLV_PCS01_DW11_CH1            0x262c
+#define _VLV_PCS23_DW11_CH1            0x282c
+#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
+#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
+
 #define _VLV_PCS_DW12_CH0              0x8230
 #define _VLV_PCS_DW12_CH1              0x8430
 #define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
@@ -1999,6 +2044,8 @@ enum punit_power_well {
 #define DCC_ADDRESSING_MODE_MASK                       (3 << 0)
 #define DCC_CHANNEL_XOR_DISABLE                                (1 << 10)
 #define DCC_CHANNEL_XOR_BIT_17                         (1 << 9)
+#define DCC2                   0x10204
+#define DCC2_MODIFIED_ENHANCED_DISABLE                 (1 << 20)
 
 /* Pineview MCH register contains DDR3 setting */
 #define CSHRDDR3CTL            0x101a8
@@ -2282,7 +2329,6 @@ enum punit_power_well {
 
 #define GEN6_GT_THREAD_STATUS_REG 0x13805c
 #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
-#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
 
 #define GEN6_GT_PERF_STATUS    (MCHBAR_MIRROR_BASE_SNB + 0x5948)
 #define GEN6_RP_STATE_LIMITS   (MCHBAR_MIRROR_BASE_SNB + 0x5994)
@@ -2506,9 +2552,7 @@ enum punit_power_well {
 
 #define EDP_PSR_AUX_CTL(dev)                   (EDP_PSR_BASE(dev) + 0x10)
 #define EDP_PSR_AUX_DATA1(dev)                 (EDP_PSR_BASE(dev) + 0x14)
-#define   EDP_PSR_DPCD_COMMAND         0x80060000
 #define EDP_PSR_AUX_DATA2(dev)                 (EDP_PSR_BASE(dev) + 0x18)
-#define   EDP_PSR_DPCD_NORMAL_OPERATION        (1<<24)
 #define EDP_PSR_AUX_DATA3(dev)                 (EDP_PSR_BASE(dev) + 0x1c)
 #define EDP_PSR_AUX_DATA4(dev)                 (EDP_PSR_BASE(dev) + 0x20)
 #define EDP_PSR_AUX_DATA5(dev)                 (EDP_PSR_BASE(dev) + 0x24)
@@ -3645,6 +3689,7 @@ enum punit_power_well {
 #define   DP_AUX_CH_CTL_PRECHARGE_TEST     (1 << 11)
 #define   DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK    (0x7ff)
 #define   DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT   0
+#define   DP_AUX_CH_CTL_SYNC_PULSE_SKL(c)   ((c) - 1)
 
 /*
  * Computing GMCH M and N values for the Display Port link
@@ -4024,17 +4069,18 @@ enum punit_power_well {
 #define   DSPFW_PLANEA_WM1_HI_MASK     (1<<0)
 
 /* drain latency register values*/
+#define DRAIN_LATENCY_PRECISION_16     16
 #define DRAIN_LATENCY_PRECISION_32     32
 #define DRAIN_LATENCY_PRECISION_64     64
 #define VLV_DDL(pipe)                  (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
-#define DDL_CURSOR_PRECISION_64                (1<<31)
-#define DDL_CURSOR_PRECISION_32                (0<<31)
+#define DDL_CURSOR_PRECISION_HIGH      (1<<31)
+#define DDL_CURSOR_PRECISION_LOW       (0<<31)
 #define DDL_CURSOR_SHIFT               24
-#define DDL_SPRITE_PRECISION_64(sprite)        (1<<(15+8*(sprite)))
-#define DDL_SPRITE_PRECISION_32(sprite)        (0<<(15+8*(sprite)))
+#define DDL_SPRITE_PRECISION_HIGH(sprite)      (1<<(15+8*(sprite)))
+#define DDL_SPRITE_PRECISION_LOW(sprite)       (0<<(15+8*(sprite)))
 #define DDL_SPRITE_SHIFT(sprite)       (8+8*(sprite))
-#define DDL_PLANE_PRECISION_64         (1<<7)
-#define DDL_PLANE_PRECISION_32         (0<<7)
+#define DDL_PLANE_PRECISION_HIGH       (1<<7)
+#define DDL_PLANE_PRECISION_LOW                (0<<7)
 #define DDL_PLANE_SHIFT                        0
 #define DRAIN_LATENCY_MASK             0x7f
 
@@ -4071,6 +4117,41 @@ enum punit_power_well {
 #define I965_CURSOR_MAX_WM     32
 #define I965_CURSOR_DFT_WM     8
 
+/* Watermark register definitions for SKL */
+#define CUR_WM_A_0             0x70140
+#define CUR_WM_B_0             0x71140
+#define PLANE_WM_1_A_0         0x70240
+#define PLANE_WM_1_B_0         0x71240
+#define PLANE_WM_2_A_0         0x70340
+#define PLANE_WM_2_B_0         0x71340
+#define PLANE_WM_TRANS_1_A_0   0x70268
+#define PLANE_WM_TRANS_1_B_0   0x71268
+#define PLANE_WM_TRANS_2_A_0   0x70368
+#define PLANE_WM_TRANS_2_B_0   0x71368
+#define CUR_WM_TRANS_A_0       0x70168
+#define CUR_WM_TRANS_B_0       0x71168
+#define   PLANE_WM_EN          (1 << 31)
+#define   PLANE_WM_LINES_SHIFT 14
+#define   PLANE_WM_LINES_MASK  0x1f
+#define   PLANE_WM_BLOCKS_MASK 0x3ff
+
+#define CUR_WM_0(pipe) _PIPE(pipe, CUR_WM_A_0, CUR_WM_B_0)
+#define CUR_WM(pipe, level) (CUR_WM_0(pipe) + ((4) * (level)))
+#define CUR_WM_TRANS(pipe) _PIPE(pipe, CUR_WM_TRANS_A_0, CUR_WM_TRANS_B_0)
+
+#define _PLANE_WM_1(pipe) _PIPE(pipe, PLANE_WM_1_A_0, PLANE_WM_1_B_0)
+#define _PLANE_WM_2(pipe) _PIPE(pipe, PLANE_WM_2_A_0, PLANE_WM_2_B_0)
+#define _PLANE_WM_BASE(pipe, plane)    \
+                       _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe))
+#define PLANE_WM(pipe, plane, level)   \
+                       (_PLANE_WM_BASE(pipe, plane) + ((4) * (level)))
+#define _PLANE_WM_TRANS_1(pipe)        \
+                       _PIPE(pipe, PLANE_WM_TRANS_1_A_0, PLANE_WM_TRANS_1_B_0)
+#define _PLANE_WM_TRANS_2(pipe)        \
+                       _PIPE(pipe, PLANE_WM_TRANS_2_A_0, PLANE_WM_TRANS_2_B_0)
+#define PLANE_WM_TRANS(pipe, plane)    \
+               _PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe))
+
 /* define the Watermark register on Ironlake */
 #define WM0_PIPEA_ILK          0x45100
 #define  WM0_PIPE_PLANE_MASK   (0xffff<<16)
@@ -4177,6 +4258,7 @@ enum punit_power_well {
 #define   MCURSOR_PIPE_A       0x00
 #define   MCURSOR_PIPE_B       (1 << 28)
 #define   MCURSOR_GAMMA_ENABLE  (1 << 26)
+#define   CURSOR_ROTATE_180    (1<<15)
 #define   CURSOR_TRICKLE_FEED_DISABLE  (1 << 14)
 #define _CURABASE              0x70084
 #define _CURAPOS               0x70088
@@ -4240,9 +4322,11 @@ enum punit_power_well {
 #define   DISPPLANE_NO_LINE_DOUBLE             0
 #define   DISPPLANE_STEREO_POLARITY_FIRST      0
 #define   DISPPLANE_STEREO_POLARITY_SECOND     (1<<18)
-#define   DISPPLANE_ROTATE_180         (1<<15)
+#define   DISPPLANE_ALPHA_PREMULTIPLY          (1<<16) /* CHV pipe B */
+#define   DISPPLANE_ROTATE_180                 (1<<15)
 #define   DISPPLANE_TRICKLE_FEED_DISABLE       (1<<14) /* Ironlake */
 #define   DISPPLANE_TILED                      (1<<10)
+#define   DISPPLANE_MIRROR                     (1<<8) /* CHV pipe B */
 #define _DSPAADDR                              0x70184
 #define _DSPASTRIDE                            0x70188
 #define _DSPAPOS                               0x7018C /* reserved */
@@ -4263,6 +4347,24 @@ enum punit_power_well {
 #define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET)
 #define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE)
 
+/* CHV pipe B blender and primary plane */
+#define _CHV_BLEND_A           0x60a00
+#define   CHV_BLEND_LEGACY             (0<<30)
+#define   CHV_BLEND_ANDROID            (1<<30)
+#define   CHV_BLEND_MPO                        (2<<30)
+#define   CHV_BLEND_MASK               (3<<30)
+#define _CHV_CANVAS_A          0x60a04
+#define _PRIMPOS_A             0x60a08
+#define _PRIMSIZE_A            0x60a0c
+#define _PRIMCNSTALPHA_A       0x60a10
+#define   PRIM_CONST_ALPHA_ENABLE      (1<<31)
+
+#define CHV_BLEND(pipe) _TRANSCODER2(pipe, _CHV_BLEND_A)
+#define CHV_CANVAS(pipe) _TRANSCODER2(pipe, _CHV_CANVAS_A)
+#define PRIMPOS(plane) _TRANSCODER2(plane, _PRIMPOS_A)
+#define PRIMSIZE(plane) _TRANSCODER2(plane, _PRIMSIZE_A)
+#define PRIMCNSTALPHA(plane) _TRANSCODER2(plane, _PRIMCNSTALPHA_A)
+
 /* Display/Sprite base address macros */
 #define DISP_BASEADDR_MASK     (0xfffff000)
 #define I915_LO_DISPBASE(val)  (val & ~DISP_BASEADDR_MASK)
@@ -4464,6 +4566,7 @@ enum punit_power_well {
 #define   SP_FORMAT_RGBA1010102                (9<<26)
 #define   SP_FORMAT_RGBX8888           (0xe<<26)
 #define   SP_FORMAT_RGBA8888           (0xf<<26)
+#define   SP_ALPHA_PREMULTIPLY         (1<<23) /* CHV pipe B */
 #define   SP_SOURCE_KEY                        (1<<22)
 #define   SP_YUV_BYTE_ORDER_MASK       (3<<16)
 #define   SP_YUV_ORDER_YUYV            (0<<16)
@@ -4472,6 +4575,7 @@ enum punit_power_well {
 #define   SP_YUV_ORDER_VYUY            (3<<16)
 #define   SP_ROTATE_180                        (1<<15)
 #define   SP_TILED                     (1<<10)
+#define   SP_MIRROR                    (1<<8) /* CHV pipe B */
 #define _SPALINOFF             (VLV_DISPLAY_BASE + 0x72184)
 #define _SPASTRIDE             (VLV_DISPLAY_BASE + 0x72188)
 #define _SPAPOS                        (VLV_DISPLAY_BASE + 0x7218c)
@@ -4482,6 +4586,7 @@ enum punit_power_well {
 #define _SPAKEYMAXVAL          (VLV_DISPLAY_BASE + 0x721a0)
 #define _SPATILEOFF            (VLV_DISPLAY_BASE + 0x721a4)
 #define _SPACONSTALPHA         (VLV_DISPLAY_BASE + 0x721a8)
+#define   SP_CONST_ALPHA_ENABLE                (1<<31)
 #define _SPAGAMC               (VLV_DISPLAY_BASE + 0x721f4)
 
 #define _SPBCNTR               (VLV_DISPLAY_BASE + 0x72280)
@@ -4510,6 +4615,195 @@ enum punit_power_well {
 #define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
 #define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
 
+/*
+ * CHV pipe B sprite CSC
+ *
+ * |cr|   |c0 c1 c2|   |cr + cr_ioff|   |cr_ooff|
+ * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
+ * |cb|   |c6 c7 c8|   |cb + cr_ioff|   |cb_ooff|
+ */
+#define SPCSCYGOFF(sprite)     (VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000)
+#define SPCSCCBOFF(sprite)     (VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000)
+#define SPCSCCROFF(sprite)     (VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000)
+#define  SPCSC_OOFF(x)         (((x) & 0x7ff) << 16) /* s11 */
+#define  SPCSC_IOFF(x)         (((x) & 0x7ff) << 0) /* s11 */
+
+#define SPCSCC01(sprite)       (VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000)
+#define SPCSCC23(sprite)       (VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000)
+#define SPCSCC45(sprite)       (VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000)
+#define SPCSCC67(sprite)       (VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000)
+#define SPCSCC8(sprite)                (VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000)
+#define  SPCSC_C1(x)           (((x) & 0x7fff) << 16) /* s3.12 */
+#define  SPCSC_C0(x)           (((x) & 0x7fff) << 0) /* s3.12 */
+
+#define SPCSCYGICLAMP(sprite)  (VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000)
+#define SPCSCCBICLAMP(sprite)  (VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000)
+#define SPCSCCRICLAMP(sprite)  (VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000)
+#define  SPCSC_IMAX(x)         (((x) & 0x7ff) << 16) /* s11 */
+#define  SPCSC_IMIN(x)         (((x) & 0x7ff) << 0) /* s11 */
+
+#define SPCSCYGOCLAMP(sprite)  (VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000)
+#define SPCSCCBOCLAMP(sprite)  (VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000)
+#define SPCSCCROCLAMP(sprite)  (VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000)
+#define  SPCSC_OMAX(x)         ((x) << 16) /* u10 */
+#define  SPCSC_OMIN(x)         ((x) << 0) /* u10 */
+
+/* Skylake plane registers */
+
+#define _PLANE_CTL_1_A                         0x70180
+#define _PLANE_CTL_2_A                         0x70280
+#define _PLANE_CTL_3_A                         0x70380
+#define   PLANE_CTL_ENABLE                     (1 << 31)
+#define   PLANE_CTL_PIPE_GAMMA_ENABLE          (1 << 30)
+#define   PLANE_CTL_FORMAT_MASK                        (0xf << 24)
+#define   PLANE_CTL_FORMAT_YUV422              (  0 << 24)
+#define   PLANE_CTL_FORMAT_NV12                        (  1 << 24)
+#define   PLANE_CTL_FORMAT_XRGB_2101010                (  2 << 24)
+#define   PLANE_CTL_FORMAT_XRGB_8888           (  4 << 24)
+#define   PLANE_CTL_FORMAT_XRGB_16161616F      (  6 << 24)
+#define   PLANE_CTL_FORMAT_AYUV                        (  8 << 24)
+#define   PLANE_CTL_FORMAT_INDEXED             ( 12 << 24)
+#define   PLANE_CTL_FORMAT_RGB_565             ( 14 << 24)
+#define   PLANE_CTL_PIPE_CSC_ENABLE            (1 << 23)
+#define   PLANE_CTL_KEY_ENABLE_MASK            (0x3 << 21)
+#define   PLANE_CTL_KEY_ENABLE_SOURCE          (  1 << 21)
+#define   PLANE_CTL_KEY_ENABLE_DESTINATION     (  2 << 21)
+#define   PLANE_CTL_ORDER_BGRX                 (0 << 20)
+#define   PLANE_CTL_ORDER_RGBX                 (1 << 20)
+#define   PLANE_CTL_YUV422_ORDER_MASK          (0x3 << 16)
+#define   PLANE_CTL_YUV422_YUYV                        (  0 << 16)
+#define   PLANE_CTL_YUV422_UYVY                        (  1 << 16)
+#define   PLANE_CTL_YUV422_YVYU                        (  2 << 16)
+#define   PLANE_CTL_YUV422_VYUY                        (  3 << 16)
+#define   PLANE_CTL_DECOMPRESSION_ENABLE       (1 << 15)
+#define   PLANE_CTL_TRICKLE_FEED_DISABLE       (1 << 14)
+#define   PLANE_CTL_PLANE_GAMMA_DISABLE                (1 << 13)
+#define   PLANE_CTL_TILED_MASK                 (0x7 << 10)
+#define   PLANE_CTL_TILED_LINEAR               (  0 << 10)
+#define   PLANE_CTL_TILED_X                    (  1 << 10)
+#define   PLANE_CTL_TILED_Y                    (  4 << 10)
+#define   PLANE_CTL_TILED_YF                   (  5 << 10)
+#define   PLANE_CTL_ALPHA_MASK                 (0x3 << 4)
+#define   PLANE_CTL_ALPHA_DISABLE              (  0 << 4)
+#define   PLANE_CTL_ALPHA_SW_PREMULTIPLY       (  2 << 4)
+#define   PLANE_CTL_ALPHA_HW_PREMULTIPLY       (  3 << 4)
+#define   PLANE_CTL_ROTATE_MASK                        0x3
+#define   PLANE_CTL_ROTATE_0                   0x0
+#define   PLANE_CTL_ROTATE_180                 0x2
+#define _PLANE_STRIDE_1_A                      0x70188
+#define _PLANE_STRIDE_2_A                      0x70288
+#define _PLANE_STRIDE_3_A                      0x70388
+#define _PLANE_POS_1_A                         0x7018c
+#define _PLANE_POS_2_A                         0x7028c
+#define _PLANE_POS_3_A                         0x7038c
+#define _PLANE_SIZE_1_A                                0x70190
+#define _PLANE_SIZE_2_A                                0x70290
+#define _PLANE_SIZE_3_A                                0x70390
+#define _PLANE_SURF_1_A                                0x7019c
+#define _PLANE_SURF_2_A                                0x7029c
+#define _PLANE_SURF_3_A                                0x7039c
+#define _PLANE_OFFSET_1_A                      0x701a4
+#define _PLANE_OFFSET_2_A                      0x702a4
+#define _PLANE_OFFSET_3_A                      0x703a4
+#define _PLANE_KEYVAL_1_A                      0x70194
+#define _PLANE_KEYVAL_2_A                      0x70294
+#define _PLANE_KEYMSK_1_A                      0x70198
+#define _PLANE_KEYMSK_2_A                      0x70298
+#define _PLANE_KEYMAX_1_A                      0x701a0
+#define _PLANE_KEYMAX_2_A                      0x702a0
+#define _PLANE_BUF_CFG_1_A                     0x7027c
+#define _PLANE_BUF_CFG_2_A                     0x7037c
+
+#define _PLANE_CTL_1_B                         0x71180
+#define _PLANE_CTL_2_B                         0x71280
+#define _PLANE_CTL_3_B                         0x71380
+#define _PLANE_CTL_1(pipe)     _PIPE(pipe, _PLANE_CTL_1_A, _PLANE_CTL_1_B)
+#define _PLANE_CTL_2(pipe)     _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B)
+#define _PLANE_CTL_3(pipe)     _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B)
+#define PLANE_CTL(pipe, plane) \
+       _PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe))
+
+#define _PLANE_STRIDE_1_B                      0x71188
+#define _PLANE_STRIDE_2_B                      0x71288
+#define _PLANE_STRIDE_3_B                      0x71388
+#define _PLANE_STRIDE_1(pipe)  \
+       _PIPE(pipe, _PLANE_STRIDE_1_A, _PLANE_STRIDE_1_B)
+#define _PLANE_STRIDE_2(pipe)  \
+       _PIPE(pipe, _PLANE_STRIDE_2_A, _PLANE_STRIDE_2_B)
+#define _PLANE_STRIDE_3(pipe)  \
+       _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B)
+#define PLANE_STRIDE(pipe, plane)      \
+       _PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
+
+#define _PLANE_POS_1_B                         0x7118c
+#define _PLANE_POS_2_B                         0x7128c
+#define _PLANE_POS_3_B                         0x7138c
+#define _PLANE_POS_1(pipe)     _PIPE(pipe, _PLANE_POS_1_A, _PLANE_POS_1_B)
+#define _PLANE_POS_2(pipe)     _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B)
+#define _PLANE_POS_3(pipe)     _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B)
+#define PLANE_POS(pipe, plane) \
+       _PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe))
+
+#define _PLANE_SIZE_1_B                                0x71190
+#define _PLANE_SIZE_2_B                                0x71290
+#define _PLANE_SIZE_3_B                                0x71390
+#define _PLANE_SIZE_1(pipe)    _PIPE(pipe, _PLANE_SIZE_1_A, _PLANE_SIZE_1_B)
+#define _PLANE_SIZE_2(pipe)    _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B)
+#define _PLANE_SIZE_3(pipe)    _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B)
+#define PLANE_SIZE(pipe, plane)        \
+       _PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe))
+
+#define _PLANE_SURF_1_B                                0x7119c
+#define _PLANE_SURF_2_B                                0x7129c
+#define _PLANE_SURF_3_B                                0x7139c
+#define _PLANE_SURF_1(pipe)    _PIPE(pipe, _PLANE_SURF_1_A, _PLANE_SURF_1_B)
+#define _PLANE_SURF_2(pipe)    _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B)
+#define _PLANE_SURF_3(pipe)    _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
+#define PLANE_SURF(pipe, plane)        \
+       _PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
+
+#define _PLANE_OFFSET_1_B                      0x711a4
+#define _PLANE_OFFSET_2_B                      0x712a4
+#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B)
+#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B)
+#define PLANE_OFFSET(pipe, plane)      \
+       _PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe))
+
+#define _PLANE_KEYVAL_1_B                      0x71194
+#define _PLANE_KEYVAL_2_B                      0x71294
+#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B)
+#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B)
+#define PLANE_KEYVAL(pipe, plane)      \
+       _PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe))
+
+#define _PLANE_KEYMSK_1_B                      0x71198
+#define _PLANE_KEYMSK_2_B                      0x71298
+#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B)
+#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B)
+#define PLANE_KEYMSK(pipe, plane)      \
+       _PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe))
+
+#define _PLANE_KEYMAX_1_B                      0x711a0
+#define _PLANE_KEYMAX_2_B                      0x712a0
+#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B)
+#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B)
+#define PLANE_KEYMAX(pipe, plane)      \
+       _PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
+
+#define _PLANE_BUF_CFG_1_B                     0x7127c
+#define _PLANE_BUF_CFG_2_B                     0x7137c
+#define _PLANE_BUF_CFG_1(pipe) \
+       _PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B)
+#define _PLANE_BUF_CFG_2(pipe) \
+       _PIPE(pipe, _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B)
+#define PLANE_BUF_CFG(pipe, plane)     \
+       _PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe))
+
+/* SKL new cursor registers */
+#define _CUR_BUF_CFG_A                         0x7017c
+#define _CUR_BUF_CFG_B                         0x7117c
+#define CUR_BUF_CFG(pipe)      _PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B)
+
 /* VBIOS regs */
 #define VGACNTRL               0x71400
 # define VGA_DISP_DISABLE                      (1 << 31)
@@ -4625,6 +4919,18 @@ enum punit_power_well {
 #define PF_VSCALE(pipe)                _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
 #define PF_HSCALE(pipe)                _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
 
+#define _PSA_CTL               0x68180
+#define _PSB_CTL               0x68980
+#define PS_ENABLE              (1<<31)
+#define _PSA_WIN_SZ            0x68174
+#define _PSB_WIN_SZ            0x68974
+#define _PSA_WIN_POS           0x68170
+#define _PSB_WIN_POS           0x68970
+
+#define PS_CTL(pipe)           _PIPE(pipe, _PSA_CTL, _PSB_CTL)
+#define PS_WIN_SZ(pipe)                _PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
+#define PS_WIN_POS(pipe)       _PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
+
 /* legacy palette */
 #define _LGC_PALETTE_A           0x4a000
 #define _LGC_PALETTE_B           0x4a800
@@ -4746,16 +5052,32 @@ enum punit_power_well {
 #define  GEN8_PIPE_SCAN_LINE_EVENT     (1 << 2)
 #define  GEN8_PIPE_VSYNC               (1 << 1)
 #define  GEN8_PIPE_VBLANK              (1 << 0)
+#define  GEN9_PIPE_CURSOR_FAULT                (1 << 11)
+#define  GEN9_PIPE_PLANE3_FAULT                (1 << 9)
+#define  GEN9_PIPE_PLANE2_FAULT                (1 << 8)
+#define  GEN9_PIPE_PLANE1_FAULT                (1 << 7)
+#define  GEN9_PIPE_PLANE3_FLIP_DONE    (1 << 5)
+#define  GEN9_PIPE_PLANE2_FLIP_DONE    (1 << 4)
+#define  GEN9_PIPE_PLANE1_FLIP_DONE    (1 << 3)
+#define  GEN9_PIPE_PLANE_FLIP_DONE(p)  (1 << (3 + p))
 #define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
        (GEN8_PIPE_CURSOR_FAULT | \
         GEN8_PIPE_SPRITE_FAULT | \
         GEN8_PIPE_PRIMARY_FAULT)
+#define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
+       (GEN9_PIPE_CURSOR_FAULT | \
+        GEN9_PIPE_PLANE3_FAULT | \
+        GEN9_PIPE_PLANE2_FAULT | \
+        GEN9_PIPE_PLANE1_FAULT)
 
 #define GEN8_DE_PORT_ISR 0x44440
 #define GEN8_DE_PORT_IMR 0x44444
 #define GEN8_DE_PORT_IIR 0x44448
 #define GEN8_DE_PORT_IER 0x4444c
 #define  GEN8_PORT_DP_A_HOTPLUG                (1 << 3)
+#define  GEN9_AUX_CHANNEL_D            (1 << 27)
+#define  GEN9_AUX_CHANNEL_C            (1 << 26)
+#define  GEN9_AUX_CHANNEL_B            (1 << 25)
 #define  GEN8_AUX_CHANNEL_A            (1 << 0)
 
 #define GEN8_DE_MISC_ISR 0x44460
@@ -4839,6 +5161,8 @@ enum punit_power_well {
 /* GEN8 chicken */
 #define HDC_CHICKEN0                           0x7300
 #define  HDC_FORCE_NON_COHERENT                        (1<<4)
+#define  HDC_DONOT_FETCH_MEM_WHEN_MASKED       (1<<11)
+#define  HDC_FENCE_DEST_SLM_DISABLE            (1<<14)
 
 /* WaCatErrorRejectionIssue */
 #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG         0x9030
@@ -5540,6 +5864,12 @@ enum punit_power_well {
 #define   VLV_GTLC_PW_MEDIA_STATUS_MASK                (1 << 5)
 #define   VLV_GTLC_PW_RENDER_STATUS_MASK       (1 << 7)
 #define  FORCEWAKE_MT                          0xa188 /* multi-threaded */
+#define  FORCEWAKE_MEDIA_GEN9                  0xa270
+#define  FORCEWAKE_RENDER_GEN9                 0xa278
+#define  FORCEWAKE_BLITTER_GEN9                        0xa188
+#define  FORCEWAKE_ACK_MEDIA_GEN9              0x0D88
+#define  FORCEWAKE_ACK_RENDER_GEN9             0x0D84
+#define  FORCEWAKE_ACK_BLITTER_GEN9            0x130044
 #define   FORCEWAKE_KERNEL                     0x1
 #define   FORCEWAKE_USER                       0x2
 #define  FORCEWAKE_MT_ACK                      0x130040
@@ -5711,9 +6041,17 @@ enum punit_power_well {
 #define   GEN6_ENCODE_RC6_VID(mv)              (((mv) - 245) / 5)
 #define   GEN6_DECODE_RC6_VID(vids)            (((vids) * 5) + 245)
 #define   DISPLAY_IPS_CONTROL                  0x19
+#define          HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL  0x1A
 #define GEN6_PCODE_DATA                                0x138128
 #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT       8
 #define   GEN6_PCODE_FREQ_RING_RATIO_SHIFT     16
+#define GEN6_PCODE_DATA1                       0x13812C
+
+#define   GEN9_PCODE_READ_MEM_LATENCY          0x6
+#define   GEN9_MEM_LATENCY_LEVEL_MASK          0xFF
+#define   GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT     8
+#define   GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT     16
+#define   GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT     24
 
 #define GEN6_GT_CORE_STATUS            0x138060
 #define   GEN6_CORE_CPD_STATE_MASK     (7<<4)
@@ -5751,6 +6089,9 @@ enum punit_power_well {
 #define   GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE  (1<<10)
 #define   GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
 
+#define GEN9_HALF_SLICE_CHICKEN5       0xe188
+#define   GEN9_DG_MIRROR_FIX_ENABLE    (1<<5)
+
 #define GEN8_ROW_CHICKEN               0xe4f0
 #define   PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE        (1<<8)
 #define   STALL_DOP_GATING_DISABLE             (1<<5)
@@ -5766,57 +6107,58 @@ enum punit_power_well {
 #define   GEN8_CENTROID_PIXEL_OPT_DIS  (1<<8)
 #define   GEN8_SAMPLER_POWER_BYPASS_DIS        (1<<1)
 
+/* Audio */
 #define G4X_AUD_VID_DID                        (dev_priv->info.display_mmio_offset + 0x62020)
-#define INTEL_AUDIO_DEVCL              0x808629FB
-#define INTEL_AUDIO_DEVBLC             0x80862801
-#define INTEL_AUDIO_DEVCTG             0x80862802
+#define   INTEL_AUDIO_DEVCL            0x808629FB
+#define   INTEL_AUDIO_DEVBLC           0x80862801
+#define   INTEL_AUDIO_DEVCTG           0x80862802
 
 #define G4X_AUD_CNTL_ST                        0x620B4
-#define G4X_ELDV_DEVCL_DEVBLC          (1 << 13)
-#define G4X_ELDV_DEVCTG                        (1 << 14)
-#define G4X_ELD_ADDR                   (0xf << 5)
-#define G4X_ELD_ACK                    (1 << 4)
+#define   G4X_ELDV_DEVCL_DEVBLC                (1 << 13)
+#define   G4X_ELDV_DEVCTG              (1 << 14)
+#define   G4X_ELD_ADDR_MASK            (0xf << 5)
+#define   G4X_ELD_ACK                  (1 << 4)
 #define G4X_HDMIW_HDMIEDID             0x6210C
 
-#define IBX_HDMIW_HDMIEDID_A           0xE2050
-#define IBX_HDMIW_HDMIEDID_B           0xE2150
+#define _IBX_HDMIW_HDMIEDID_A          0xE2050
+#define _IBX_HDMIW_HDMIEDID_B          0xE2150
 #define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
-                                       IBX_HDMIW_HDMIEDID_A, \
-                                       IBX_HDMIW_HDMIEDID_B)
-#define IBX_AUD_CNTL_ST_A              0xE20B4
-#define IBX_AUD_CNTL_ST_B              0xE21B4
+                                       _IBX_HDMIW_HDMIEDID_A, \
+                                       _IBX_HDMIW_HDMIEDID_B)
+#define _IBX_AUD_CNTL_ST_A             0xE20B4
+#define _IBX_AUD_CNTL_ST_B             0xE21B4
 #define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
-                                       IBX_AUD_CNTL_ST_A, \
-                                       IBX_AUD_CNTL_ST_B)
-#define IBX_ELD_BUFFER_SIZE            (0x1f << 10)
-#define IBX_ELD_ADDRESS                        (0x1f << 5)
-#define IBX_ELD_ACK                    (1 << 4)
+                                       _IBX_AUD_CNTL_ST_A, \
+                                       _IBX_AUD_CNTL_ST_B)
+#define   IBX_ELD_BUFFER_SIZE_MASK     (0x1f << 10)
+#define   IBX_ELD_ADDRESS_MASK         (0x1f << 5)
+#define   IBX_ELD_ACK                  (1 << 4)
 #define IBX_AUD_CNTL_ST2               0xE20C0
-#define IBX_ELD_VALIDB                 (1 << 0)
-#define IBX_CP_READYB                  (1 << 1)
+#define   IBX_CP_READY(port)           ((1 << 1) << (((port) - 1) * 4))
+#define   IBX_ELD_VALID(port)          ((1 << 0) << (((port) - 1) * 4))
 
-#define CPT_HDMIW_HDMIEDID_A           0xE5050
-#define CPT_HDMIW_HDMIEDID_B           0xE5150
+#define _CPT_HDMIW_HDMIEDID_A          0xE5050
+#define _CPT_HDMIW_HDMIEDID_B          0xE5150
 #define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
-                                       CPT_HDMIW_HDMIEDID_A, \
-                                       CPT_HDMIW_HDMIEDID_B)
-#define CPT_AUD_CNTL_ST_A              0xE50B4
-#define CPT_AUD_CNTL_ST_B              0xE51B4
+                                       _CPT_HDMIW_HDMIEDID_A, \
+                                       _CPT_HDMIW_HDMIEDID_B)
+#define _CPT_AUD_CNTL_ST_A             0xE50B4
+#define _CPT_AUD_CNTL_ST_B             0xE51B4
 #define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
-                                       CPT_AUD_CNTL_ST_A, \
-                                       CPT_AUD_CNTL_ST_B)
+                                       _CPT_AUD_CNTL_ST_A, \
+                                       _CPT_AUD_CNTL_ST_B)
 #define CPT_AUD_CNTRL_ST2              0xE50C0
 
-#define VLV_HDMIW_HDMIEDID_A           (VLV_DISPLAY_BASE + 0x62050)
-#define VLV_HDMIW_HDMIEDID_B           (VLV_DISPLAY_BASE + 0x62150)
+#define _VLV_HDMIW_HDMIEDID_A          (VLV_DISPLAY_BASE + 0x62050)
+#define _VLV_HDMIW_HDMIEDID_B          (VLV_DISPLAY_BASE + 0x62150)
 #define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
-                                       VLV_HDMIW_HDMIEDID_A, \
-                                       VLV_HDMIW_HDMIEDID_B)
-#define VLV_AUD_CNTL_ST_A              (VLV_DISPLAY_BASE + 0x620B4)
-#define VLV_AUD_CNTL_ST_B              (VLV_DISPLAY_BASE + 0x621B4)
+                                       _VLV_HDMIW_HDMIEDID_A, \
+                                       _VLV_HDMIW_HDMIEDID_B)
+#define _VLV_AUD_CNTL_ST_A             (VLV_DISPLAY_BASE + 0x620B4)
+#define _VLV_AUD_CNTL_ST_B             (VLV_DISPLAY_BASE + 0x621B4)
 #define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
-                                       VLV_AUD_CNTL_ST_A, \
-                                       VLV_AUD_CNTL_ST_B)
+                                       _VLV_AUD_CNTL_ST_A, \
+                                       _VLV_AUD_CNTL_ST_B)
 #define VLV_AUD_CNTL_ST2               (VLV_DISPLAY_BASE + 0x620C0)
 
 /* These are the 4 32-bit write offset registers for each stream
@@ -5825,28 +6167,28 @@ enum punit_power_well {
  */
 #define GEN7_SO_WRITE_OFFSET(n)                (0x5280 + (n) * 4)
 
-#define IBX_AUD_CONFIG_A                       0xe2000
-#define IBX_AUD_CONFIG_B                       0xe2100
+#define _IBX_AUD_CONFIG_A              0xe2000
+#define _IBX_AUD_CONFIG_B              0xe2100
 #define IBX_AUD_CFG(pipe) _PIPE(pipe, \
-                                       IBX_AUD_CONFIG_A, \
-                                       IBX_AUD_CONFIG_B)
-#define CPT_AUD_CONFIG_A                       0xe5000
-#define CPT_AUD_CONFIG_B                       0xe5100
+                                       _IBX_AUD_CONFIG_A, \
+                                       _IBX_AUD_CONFIG_B)
+#define _CPT_AUD_CONFIG_A              0xe5000
+#define _CPT_AUD_CONFIG_B              0xe5100
 #define CPT_AUD_CFG(pipe) _PIPE(pipe, \
-                                       CPT_AUD_CONFIG_A, \
-                                       CPT_AUD_CONFIG_B)
-#define VLV_AUD_CONFIG_A               (VLV_DISPLAY_BASE + 0x62000)
-#define VLV_AUD_CONFIG_B               (VLV_DISPLAY_BASE + 0x62100)
+                                       _CPT_AUD_CONFIG_A, \
+                                       _CPT_AUD_CONFIG_B)
+#define _VLV_AUD_CONFIG_A              (VLV_DISPLAY_BASE + 0x62000)
+#define _VLV_AUD_CONFIG_B              (VLV_DISPLAY_BASE + 0x62100)
 #define VLV_AUD_CFG(pipe) _PIPE(pipe, \
-                                       VLV_AUD_CONFIG_A, \
-                                       VLV_AUD_CONFIG_B)
+                                       _VLV_AUD_CONFIG_A, \
+                                       _VLV_AUD_CONFIG_B)
 
 #define   AUD_CONFIG_N_VALUE_INDEX             (1 << 29)
 #define   AUD_CONFIG_N_PROG_ENABLE             (1 << 28)
 #define   AUD_CONFIG_UPPER_N_SHIFT             20
-#define   AUD_CONFIG_UPPER_N_VALUE             (0xff << 20)
+#define   AUD_CONFIG_UPPER_N_MASK              (0xff << 20)
 #define   AUD_CONFIG_LOWER_N_SHIFT             4
-#define   AUD_CONFIG_LOWER_N_VALUE             (0xfff << 4)
+#define   AUD_CONFIG_LOWER_N_MASK              (0xfff << 4)
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT    16
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK     (0xf << 16)
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI_25175    (0 << 16)
@@ -5862,52 +6204,44 @@ enum punit_power_well {
 #define   AUD_CONFIG_DISABLE_NCTS              (1 << 3)
 
 /* HSW Audio */
-#define   HSW_AUD_CONFIG_A             0x65000 /* Audio Configuration Transcoder A */
-#define   HSW_AUD_CONFIG_B             0x65100 /* Audio Configuration Transcoder B */
-#define   HSW_AUD_CFG(pipe) _PIPE(pipe, \
-                                       HSW_AUD_CONFIG_A, \
-                                       HSW_AUD_CONFIG_B)
-
-#define   HSW_AUD_MISC_CTRL_A          0x65010 /* Audio Misc Control Convert 1 */
-#define   HSW_AUD_MISC_CTRL_B          0x65110 /* Audio Misc Control Convert 2 */
-#define   HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
-                                       HSW_AUD_MISC_CTRL_A, \
-                                       HSW_AUD_MISC_CTRL_B)
-
-#define   HSW_AUD_DIP_ELD_CTRL_ST_A    0x650b4 /* Audio DIP and ELD Control State Transcoder A */
-#define   HSW_AUD_DIP_ELD_CTRL_ST_B    0x651b4 /* Audio DIP and ELD Control State Transcoder B */
-#define   HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
-                                       HSW_AUD_DIP_ELD_CTRL_ST_A, \
-                                       HSW_AUD_DIP_ELD_CTRL_ST_B)
+#define _HSW_AUD_CONFIG_A              0x65000
+#define _HSW_AUD_CONFIG_B              0x65100
+#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
+                                       _HSW_AUD_CONFIG_A, \
+                                       _HSW_AUD_CONFIG_B)
+
+#define _HSW_AUD_MISC_CTRL_A           0x65010
+#define _HSW_AUD_MISC_CTRL_B           0x65110
+#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
+                                       _HSW_AUD_MISC_CTRL_A, \
+                                       _HSW_AUD_MISC_CTRL_B)
+
+#define _HSW_AUD_DIP_ELD_CTRL_ST_A     0x650b4
+#define _HSW_AUD_DIP_ELD_CTRL_ST_B     0x651b4
+#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
+                                       _HSW_AUD_DIP_ELD_CTRL_ST_A, \
+                                       _HSW_AUD_DIP_ELD_CTRL_ST_B)
 
 /* Audio Digital Converter */
-#define   HSW_AUD_DIG_CNVT_1           0x65080 /* Audio Converter 1 */
-#define   HSW_AUD_DIG_CNVT_2           0x65180 /* Audio Converter 1 */
-#define   AUD_DIG_CNVT(pipe) _PIPE(pipe, \
-                                       HSW_AUD_DIG_CNVT_1, \
-                                       HSW_AUD_DIG_CNVT_2)
-#define   DIP_PORT_SEL_MASK            0x3
-
-#define   HSW_AUD_EDID_DATA_A          0x65050
-#define   HSW_AUD_EDID_DATA_B          0x65150
-#define   HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
-                                       HSW_AUD_EDID_DATA_A, \
-                                       HSW_AUD_EDID_DATA_B)
-
-#define   HSW_AUD_PIPE_CONV_CFG                0x6507c /* Audio pipe and converter configs */
-#define   HSW_AUD_PIN_ELD_CP_VLD       0x650c0 /* Audio ELD and CP Ready Status */
-#define   AUDIO_INACTIVE_C             (1<<11)
-#define   AUDIO_INACTIVE_B             (1<<7)
-#define   AUDIO_INACTIVE_A             (1<<3)
-#define   AUDIO_OUTPUT_ENABLE_A                (1<<2)
-#define   AUDIO_OUTPUT_ENABLE_B                (1<<6)
-#define   AUDIO_OUTPUT_ENABLE_C                (1<<10)
-#define   AUDIO_ELD_VALID_A            (1<<0)
-#define   AUDIO_ELD_VALID_B            (1<<4)
-#define   AUDIO_ELD_VALID_C            (1<<8)
-#define   AUDIO_CP_READY_A             (1<<1)
-#define   AUDIO_CP_READY_B             (1<<5)
-#define   AUDIO_CP_READY_C             (1<<9)
+#define _HSW_AUD_DIG_CNVT_1            0x65080
+#define _HSW_AUD_DIG_CNVT_2            0x65180
+#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
+                                       _HSW_AUD_DIG_CNVT_1, \
+                                       _HSW_AUD_DIG_CNVT_2)
+#define DIP_PORT_SEL_MASK              0x3
+
+#define _HSW_AUD_EDID_DATA_A           0x65050
+#define _HSW_AUD_EDID_DATA_B           0x65150
+#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
+                                       _HSW_AUD_EDID_DATA_A, \
+                                       _HSW_AUD_EDID_DATA_B)
+
+#define HSW_AUD_PIPE_CONV_CFG          0x6507c
+#define HSW_AUD_PIN_ELD_CP_VLD         0x650c0
+#define   AUDIO_INACTIVE(trans)                ((1 << 3) << ((trans) * 4))
+#define   AUDIO_OUTPUT_ENABLE(trans)   ((1 << 2) << ((trans) * 4))
+#define   AUDIO_CP_READY(trans)                ((1 << 1) << ((trans) * 4))
+#define   AUDIO_ELD_VALID(trans)       ((1 << 0) << ((trans) * 4))
 
 /* HSW Power Wells */
 #define HSW_PWR_WELL_BIOS                      0x45400 /* CTL1 */
@@ -6125,6 +6459,83 @@ enum punit_power_well {
 #define  LCPLL_CD_SOURCE_FCLK          (1<<21)
 #define  LCPLL_CD_SOURCE_FCLK_DONE     (1<<19)
 
+/*
+ * SKL Clocks
+ */
+
+/* CDCLK_CTL */
+#define CDCLK_CTL                      0x46000
+#define  CDCLK_FREQ_SEL_MASK           (3<<26)
+#define  CDCLK_FREQ_450_432            (0<<26)
+#define  CDCLK_FREQ_540                        (1<<26)
+#define  CDCLK_FREQ_337_308            (2<<26)
+#define  CDCLK_FREQ_675_617            (3<<26)
+#define  CDCLK_FREQ_DECIMAL_MASK       (0x7ff)
+
+/* LCPLL_CTL */
+#define LCPLL1_CTL             0x46010
+#define LCPLL2_CTL             0x46014
+#define  LCPLL_PLL_ENABLE      (1<<31)
+
+/* DPLL control1 */
+#define DPLL_CTRL1             0x6C058
+#define  DPLL_CTRL1_HDMI_MODE(id)              (1<<((id)*6+5))
+#define  DPLL_CTRL1_SSC(id)                    (1<<((id)*6+4))
+#define  DPLL_CRTL1_LINK_RATE_MASK(id)         (7<<((id)*6+1))
+#define  DPLL_CRTL1_LINK_RATE_SHIFT(id)                ((id)*6+1)
+#define  DPLL_CRTL1_LINK_RATE(linkrate, id)    ((linkrate)<<((id)*6+1))
+#define  DPLL_CTRL1_OVERRIDE(id)               (1<<((id)*6))
+#define  DPLL_CRTL1_LINK_RATE_2700             0
+#define  DPLL_CRTL1_LINK_RATE_1350             1
+#define  DPLL_CRTL1_LINK_RATE_810              2
+#define  DPLL_CRTL1_LINK_RATE_1620             3
+#define  DPLL_CRTL1_LINK_RATE_1080             4
+#define  DPLL_CRTL1_LINK_RATE_2160             5
+
+/* DPLL control2 */
+#define DPLL_CTRL2                             0x6C05C
+#define  DPLL_CTRL2_DDI_CLK_OFF(port)          (1<<(port+15))
+#define  DPLL_CTRL2_DDI_CLK_SEL_MASK(port)     (3<<((port)*3+1))
+#define  DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port)    ((port)*3+1)
+#define  DPLL_CTRL2_DDI_CLK_SEL(clk, port)     (clk<<((port)*3+1))
+#define  DPLL_CTRL2_DDI_SEL_OVERRIDE(port)     (1<<((port)*3))
+
+/* DPLL Status */
+#define DPLL_STATUS    0x6C060
+#define  DPLL_LOCK(id) (1<<((id)*8))
+
+/* DPLL cfg */
+#define DPLL1_CFGCR1   0x6C040
+#define DPLL2_CFGCR1   0x6C048
+#define DPLL3_CFGCR1   0x6C050
+#define  DPLL_CFGCR1_FREQ_ENABLE       (1<<31)
+#define  DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
+#define  DPLL_CFGCR1_DCO_FRACTION(x)   (x<<9)
+#define  DPLL_CFGCR1_DCO_INTEGER_MASK  (0x1ff)
+
+#define DPLL1_CFGCR2   0x6C044
+#define DPLL2_CFGCR2   0x6C04C
+#define DPLL3_CFGCR2   0x6C054
+#define  DPLL_CFGCR2_QDIV_RATIO_MASK   (0xff<<8)
+#define  DPLL_CFGCR2_QDIV_RATIO(x)     (x<<8)
+#define  DPLL_CFGCR2_QDIV_MODE(x)      (x<<7)
+#define  DPLL_CFGCR2_KDIV_MASK         (3<<5)
+#define  DPLL_CFGCR2_KDIV(x)           (x<<5)
+#define  DPLL_CFGCR2_KDIV_5 (0<<5)
+#define  DPLL_CFGCR2_KDIV_2 (1<<5)
+#define  DPLL_CFGCR2_KDIV_3 (2<<5)
+#define  DPLL_CFGCR2_KDIV_1 (3<<5)
+#define  DPLL_CFGCR2_PDIV_MASK         (7<<2)
+#define  DPLL_CFGCR2_PDIV(x)           (x<<2)
+#define  DPLL_CFGCR2_PDIV_1 (0<<2)
+#define  DPLL_CFGCR2_PDIV_2 (1<<2)
+#define  DPLL_CFGCR2_PDIV_3 (2<<2)
+#define  DPLL_CFGCR2_PDIV_7 (4<<2)
+#define  DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
+
+#define GET_CFG_CR1_REG(id) (DPLL1_CFGCR1 + (id - SKL_DPLL1) * 8)
+#define GET_CFG_CR2_REG(id) (DPLL1_CFGCR2 + (id - SKL_DPLL1) * 8)
+
 /* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
  * since on HSW we can't write to it using I915_WRITE. */
 #define D_COMP_HSW                     (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
index 043123c77a1f4e0f3d92143f3804eb614c94fd24..dfe661743398b1c90e0f9dc4b25ff6e9d551985c 100644 (file)
@@ -203,34 +203,19 @@ static void i915_save_display(struct drm_device *dev)
                i915_save_display_reg(dev);
 
        /* LVDS state */
-       if (HAS_PCH_SPLIT(dev)) {
-               dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
-               if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-                       dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
-       } else if (IS_VALLEYVIEW(dev)) {
-               dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
-               dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
-
-               dev_priv->regfile.saveBLC_HIST_CTL =
-                       I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
-               dev_priv->regfile.saveBLC_HIST_CTL_B =
-                       I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
-       } else {
-               dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
-               dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
-               dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
-               if (IS_MOBILE(dev) && !IS_I830(dev))
-                       dev_priv->regfile.saveLVDS = I915_READ(LVDS);
-       }
-
-       if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
-               dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+               dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
+       else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+               dev_priv->regfile.saveLVDS = I915_READ(LVDS);
 
+       /* Panel power sequencer */
        if (HAS_PCH_SPLIT(dev)) {
+               dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
                dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
                dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
                dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
-       } else {
+       } else if (!IS_VALLEYVIEW(dev)) {
+               dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
                dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
                dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
                dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
@@ -259,29 +244,19 @@ static void i915_restore_display(struct drm_device *dev)
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                mask = ~LVDS_PORT_EN;
 
+       /* LVDS state */
        if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
                I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
        else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
                I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
 
-       if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
-               I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
-
+       /* Panel power sequencer */
        if (HAS_PCH_SPLIT(dev)) {
                I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
                I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
                I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
                I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
-               I915_WRITE(RSTDBYCTL,
-                          dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
-       } else if (IS_VALLEYVIEW(dev)) {
-               I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
-                          dev_priv->regfile.saveBLC_HIST_CTL);
-               I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
-                          dev_priv->regfile.saveBLC_HIST_CTL);
-       } else {
-               I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
-               I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+       } else if (!IS_VALLEYVIEW(dev)) {
                I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
                I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
                I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
@@ -368,6 +343,8 @@ int i915_restore_state(struct drm_device *dev)
                        I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
                        I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
                        I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
+                       I915_WRITE(RSTDBYCTL,
+                                  dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
                } else {
                        I915_WRITE(IER, dev_priv->regfile.saveIER);
                        I915_WRITE(IMR, dev_priv->regfile.saveIMR);
index 503847f18fdd5a1252403ca44d85de7daab6fa82..4a5af695307eeaf6f9531ea658bbe3afadb3aa88 100644 (file)
@@ -139,8 +139,6 @@ static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
 static struct attribute *rc6_attrs[] = {
        &dev_attr_rc6_enable.attr,
        &dev_attr_rc6_residency_ms.attr,
-       &dev_attr_rc6p_residency_ms.attr,
-       &dev_attr_rc6pp_residency_ms.attr,
        NULL
 };
 
@@ -148,6 +146,17 @@ static struct attribute_group rc6_attr_group = {
        .name = power_group_name,
        .attrs =  rc6_attrs
 };
+
+static struct attribute *rc6p_attrs[] = {
+       &dev_attr_rc6p_residency_ms.attr,
+       &dev_attr_rc6pp_residency_ms.attr,
+       NULL
+};
+
+static struct attribute_group rc6p_attr_group = {
+       .name = power_group_name,
+       .attrs =  rc6p_attrs
+};
 #endif
 
 static int l3_access_valid(struct drm_device *dev, loff_t offset)
@@ -595,12 +604,18 @@ void i915_setup_sysfs(struct drm_device *dev)
        int ret;
 
 #ifdef CONFIG_PM
-       if (INTEL_INFO(dev)->gen >= 6) {
+       if (HAS_RC6(dev)) {
                ret = sysfs_merge_group(&dev->primary->kdev->kobj,
                                        &rc6_attr_group);
                if (ret)
                        DRM_ERROR("RC6 residency sysfs setup failed\n");
        }
+       if (HAS_RC6p(dev)) {
+               ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+                                       &rc6p_attr_group);
+               if (ret)
+                       DRM_ERROR("RC6p residency sysfs setup failed\n");
+       }
 #endif
        if (HAS_L3_DPF(dev)) {
                ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
@@ -640,5 +655,6 @@ void i915_teardown_sysfs(struct drm_device *dev)
        device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
 #ifdef CONFIG_PM
        sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
+       sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
 #endif
 }
index f5aa0067755a04aa0bb52d58f3dcabc3e3aa1f94..751d4ad14d6224028c93b1bdcdeec1f2d1b46a16 100644 (file)
@@ -587,6 +587,110 @@ TRACE_EVENT(intel_gpu_freq_change,
            TP_printk("new_freq=%u", __entry->freq)
 );
 
+/**
+ * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
+ *
+ * With full ppgtt enabled each process using drm will allocate at least one
+ * translation table. With these traces it is possible to keep track of the
+ * allocation and of the lifetime of the tables; this can be used during
+ * testing/debug to verify that we are not leaking ppgtts.
+ * These traces identify the ppgtt through the vm pointer, which is also printed
+ * by the i915_vma_bind and i915_vma_unbind tracepoints.
+ */
+DECLARE_EVENT_CLASS(i915_ppgtt,
+       TP_PROTO(struct i915_address_space *vm),
+       TP_ARGS(vm),
+
+       TP_STRUCT__entry(
+                       __field(struct i915_address_space *, vm)
+                       __field(u32, dev)
+       ),
+
+       TP_fast_assign(
+                       __entry->vm = vm;
+                       __entry->dev = vm->dev->primary->index;
+       ),
+
+       TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
+)
+
+DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
+       TP_PROTO(struct i915_address_space *vm),
+       TP_ARGS(vm)
+);
+
+DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
+       TP_PROTO(struct i915_address_space *vm),
+       TP_ARGS(vm)
+);
+
+/**
+ * DOC: i915_context_create and i915_context_free tracepoints
+ *
+ * These tracepoints are used to track creation and deletion of contexts.
+ * If full ppgtt is enabled, they also print the address of the vm assigned to
+ * the context.
+ */
+DECLARE_EVENT_CLASS(i915_context,
+       TP_PROTO(struct intel_context *ctx),
+       TP_ARGS(ctx),
+
+       TP_STRUCT__entry(
+                       __field(u32, dev)
+                       __field(struct intel_context *, ctx)
+                       __field(struct i915_address_space *, vm)
+       ),
+
+       TP_fast_assign(
+                       __entry->ctx = ctx;
+                       __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
+                       __entry->dev = ctx->file_priv->dev_priv->dev->primary->index;
+       ),
+
+       TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
+                 __entry->dev, __entry->ctx, __entry->vm)
+)
+
+DEFINE_EVENT(i915_context, i915_context_create,
+       TP_PROTO(struct intel_context *ctx),
+       TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(i915_context, i915_context_free,
+       TP_PROTO(struct intel_context *ctx),
+       TP_ARGS(ctx)
+);
+
+/**
+ * DOC: switch_mm tracepoint
+ *
+ * This tracepoint allows tracking of the mm switch, which is an important point
+ * in the lifetime of the vm in the legacy submission path. This tracepoint is
+ * called only if full ppgtt is enabled.
+ */
+TRACE_EVENT(switch_mm,
+       TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
+
+       TP_ARGS(ring, to),
+
+       TP_STRUCT__entry(
+                       __field(u32, ring)
+                       __field(struct intel_context *, to)
+                       __field(struct i915_address_space *, vm)
+                       __field(u32, dev)
+       ),
+
+       TP_fast_assign(
+                       __entry->ring = ring->id;
+                       __entry->to = to;
+                       __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
+                       __entry->dev = ring->dev->primary->index;
+       ),
+
+       TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
+                 __entry->dev, __entry->ring, __entry->to, __entry->vm)
+);
+
 #endif /* _I915_TRACE_H_ */
 
 /* This part must be outside protection */
index 480da593e6c036f68c20a827c7f499377dccaf27..d10fe3e9c49f69d8712d81e812e43dad68a3c273 100644 (file)
@@ -270,6 +270,12 @@ void i915_save_display_reg(struct drm_device *dev)
        }
        /* FIXME: regfile.save TV & SDVO state */
 
+       /* Panel fitter */
+       if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
+               dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+               dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+       }
+
        /* Backlight */
        if (INTEL_INFO(dev)->gen <= 4)
                pci_read_config_byte(dev->pdev, PCI_LBPC,
@@ -284,6 +290,7 @@ void i915_save_display_reg(struct drm_device *dev)
                dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
                if (INTEL_INFO(dev)->gen >= 4)
                        dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+               dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
        }
 
        return;
@@ -313,6 +320,13 @@ void i915_restore_display_reg(struct drm_device *dev)
                if (INTEL_INFO(dev)->gen >= 4)
                        I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
                I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+               I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+       }
+
+       /* Panel fitter */
+       if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
+               I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
+               I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
        }
 
        /* Display port ratios (must be done before clock is set) */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
new file mode 100644 (file)
index 0000000..2c7ed5c
--- /dev/null
@@ -0,0 +1,463 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: High Definition Audio over HDMI and Display Port
+ *
+ * The graphics and audio drivers together support High Definition Audio over
+ * HDMI and Display Port. The audio programming sequences are divided into audio
+ * codec and controller enable and disable sequences. The graphics driver
+ * handles the audio codec sequences, while the audio driver handles the audio
+ * controller sequences.
+ *
+ * The disable sequences must be performed before disabling the transcoder or
+ * port. The enable sequences may only be performed after enabling the
+ * transcoder and port, and after completed link training.
+ *
+ * The codec and controller sequences could be done either parallel or serial,
+ * but generally the ELDV/PD change in the codec sequence indicates to the audio
+ * driver that the controller sequence should start. Indeed, most of the
+ * co-operation between the graphics and audio drivers is handled via audio
+ * related registers. (The notable exception is the power management, not
+ * covered here.)
+ */
+
+static const struct {
+       int clock;
+       u32 config;
+} hdmi_audio_clock[] = {
+       { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
+       { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
+       { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
+       { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
+       { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
+       { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
+       { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
+       { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
+       { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
+       { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+};
+
+/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
+static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
+               if (mode->clock == hdmi_audio_clock[i].clock)
+                       break;
+       }
+
+       if (i == ARRAY_SIZE(hdmi_audio_clock)) {
+               DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
+               i = 1;
+       }
+
+       DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+                     hdmi_audio_clock[i].clock,
+                     hdmi_audio_clock[i].config);
+
+       return hdmi_audio_clock[i].config;
+}
+
+static bool intel_eld_uptodate(struct drm_connector *connector,
+                              int reg_eldv, uint32_t bits_eldv,
+                              int reg_elda, uint32_t bits_elda,
+                              int reg_edid)
+{
+       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       uint8_t *eld = connector->eld;
+       uint32_t tmp;
+       int i;
+
+       tmp = I915_READ(reg_eldv);
+       tmp &= bits_eldv;
+
+       if (!tmp)
+               return false;
+
+       tmp = I915_READ(reg_elda);
+       tmp &= ~bits_elda;
+       I915_WRITE(reg_elda, tmp);
+
+       for (i = 0; i < drm_eld_size(eld) / 4; i++)
+               if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+                       return false;
+
+       return true;
+}
+
+static void g4x_audio_codec_disable(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       uint32_t eldv, tmp;
+
+       DRM_DEBUG_KMS("Disable audio codec\n");
+
+       tmp = I915_READ(G4X_AUD_VID_DID);
+       if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
+               eldv = G4X_ELDV_DEVCL_DEVBLC;
+       else
+               eldv = G4X_ELDV_DEVCTG;
+
+       /* Invalidate ELD */
+       tmp = I915_READ(G4X_AUD_CNTL_ST);
+       tmp &= ~eldv;
+       I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+}
+
+static void g4x_audio_codec_enable(struct drm_connector *connector,
+                                  struct intel_encoder *encoder,
+                                  struct drm_display_mode *mode)
+{
+       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       uint8_t *eld = connector->eld;
+       uint32_t eldv;
+       uint32_t tmp;
+       int len, i;
+
+       DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", eld[2]);
+
+       tmp = I915_READ(G4X_AUD_VID_DID);
+       if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
+               eldv = G4X_ELDV_DEVCL_DEVBLC;
+       else
+               eldv = G4X_ELDV_DEVCTG;
+
+       if (intel_eld_uptodate(connector,
+                              G4X_AUD_CNTL_ST, eldv,
+                              G4X_AUD_CNTL_ST, G4X_ELD_ADDR_MASK,
+                              G4X_HDMIW_HDMIEDID))
+               return;
+
+       tmp = I915_READ(G4X_AUD_CNTL_ST);
+       tmp &= ~(eldv | G4X_ELD_ADDR_MASK);
+       len = (tmp >> 9) & 0x1f;                /* ELD buffer size */
+       I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+
+       len = min(drm_eld_size(eld) / 4, len);
+       DRM_DEBUG_DRIVER("ELD size %d\n", len);
+       for (i = 0; i < len; i++)
+               I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
+
+       tmp = I915_READ(G4X_AUD_CNTL_ST);
+       tmp |= eldv;
+       I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+}
+
+static void hsw_audio_codec_disable(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       enum pipe pipe = intel_crtc->pipe;
+       uint32_t tmp;
+
+       DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
+
+       /* Disable timestamps */
+       tmp = I915_READ(HSW_AUD_CFG(pipe));
+       tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+       tmp |= AUD_CONFIG_N_PROG_ENABLE;
+       tmp &= ~AUD_CONFIG_UPPER_N_MASK;
+       tmp &= ~AUD_CONFIG_LOWER_N_MASK;
+       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+               tmp |= AUD_CONFIG_N_VALUE_INDEX;
+       I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+       /* Invalidate ELD */
+       tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+       tmp &= ~AUDIO_ELD_VALID(pipe);
+       tmp &= ~AUDIO_OUTPUT_ENABLE(pipe);
+       I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+}
+
+static void hsw_audio_codec_enable(struct drm_connector *connector,
+                                  struct intel_encoder *encoder,
+                                  struct drm_display_mode *mode)
+{
+       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       enum pipe pipe = intel_crtc->pipe;
+       const uint8_t *eld = connector->eld;
+       uint32_t tmp;
+       int len, i;
+
+       DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
+                     pipe_name(pipe), drm_eld_size(eld));
+
+       /* Enable audio presence detect, invalidate ELD */
+       tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+       tmp |= AUDIO_OUTPUT_ENABLE(pipe);
+       tmp &= ~AUDIO_ELD_VALID(pipe);
+       I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+       /*
+        * FIXME: We're supposed to wait for vblank here, but we have vblanks
+        * disabled during the mode set. The proper fix would be to push the
+        * rest of the setup into a vblank work item, queued here, but the
+        * infrastructure is not there yet.
+        */
+
+       /* Reset ELD write address */
+       tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(pipe));
+       tmp &= ~IBX_ELD_ADDRESS_MASK;
+       I915_WRITE(HSW_AUD_DIP_ELD_CTRL(pipe), tmp);
+
+       /* Up to 84 bytes of hw ELD buffer */
+       len = min(drm_eld_size(eld), 84);
+       for (i = 0; i < len / 4; i++)
+               I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i));
+
+       /* ELD valid */
+       tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+       tmp |= AUDIO_ELD_VALID(pipe);
+       I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+       /* Enable timestamps */
+       tmp = I915_READ(HSW_AUD_CFG(pipe));
+       tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+       tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+       tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+               tmp |= AUD_CONFIG_N_VALUE_INDEX;
+       else
+               tmp |= audio_config_hdmi_pixel_clock(mode);
+       I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+}
+
+static void ilk_audio_codec_disable(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_digital_port *intel_dig_port =
+               enc_to_dig_port(&encoder->base);
+       enum port port = intel_dig_port->port;
+       enum pipe pipe = intel_crtc->pipe;
+       uint32_t tmp, eldv;
+       int aud_config;
+       int aud_cntrl_st2;
+
+       DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
+                     port_name(port), pipe_name(pipe));
+
+       if (HAS_PCH_IBX(dev_priv->dev)) {
+               aud_config = IBX_AUD_CFG(pipe);
+               aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+       } else if (IS_VALLEYVIEW(dev_priv)) {
+               aud_config = VLV_AUD_CFG(pipe);
+               aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
+       } else {
+               aud_config = CPT_AUD_CFG(pipe);
+               aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+       }
+
+       /* Disable timestamps */
+       tmp = I915_READ(aud_config);
+       tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+       tmp |= AUD_CONFIG_N_PROG_ENABLE;
+       tmp &= ~AUD_CONFIG_UPPER_N_MASK;
+       tmp &= ~AUD_CONFIG_LOWER_N_MASK;
+       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+               tmp |= AUD_CONFIG_N_VALUE_INDEX;
+       I915_WRITE(aud_config, tmp);
+
+       if (WARN_ON(!port)) {
+               eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
+                       IBX_ELD_VALID(PORT_D);
+       } else {
+               eldv = IBX_ELD_VALID(port);
+       }
+
+       /* Invalidate ELD */
+       tmp = I915_READ(aud_cntrl_st2);
+       tmp &= ~eldv;
+       I915_WRITE(aud_cntrl_st2, tmp);
+}
+
+static void ilk_audio_codec_enable(struct drm_connector *connector,
+                                  struct intel_encoder *encoder,
+                                  struct drm_display_mode *mode)
+{
+       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_digital_port *intel_dig_port =
+               enc_to_dig_port(&encoder->base);
+       enum port port = intel_dig_port->port;
+       enum pipe pipe = intel_crtc->pipe;
+       uint8_t *eld = connector->eld;
+       uint32_t eldv;
+       uint32_t tmp;
+       int len, i;
+       int hdmiw_hdmiedid;
+       int aud_config;
+       int aud_cntl_st;
+       int aud_cntrl_st2;
+
+       DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
+                     port_name(port), pipe_name(pipe), drm_eld_size(eld));
+
+       /*
+        * FIXME: We're supposed to wait for vblank here, but we have vblanks
+        * disabled during the mode set. The proper fix would be to push the
+        * rest of the setup into a vblank work item, queued here, but the
+        * infrastructure is not there yet.
+        */
+
+       if (HAS_PCH_IBX(connector->dev)) {
+               hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+               aud_config = IBX_AUD_CFG(pipe);
+               aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
+               aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+       } else if (IS_VALLEYVIEW(connector->dev)) {
+               hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
+               aud_config = VLV_AUD_CFG(pipe);
+               aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
+               aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
+       } else {
+               hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+               aud_config = CPT_AUD_CFG(pipe);
+               aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
+               aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+       }
+
+       if (WARN_ON(!port)) {
+               eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
+                       IBX_ELD_VALID(PORT_D);
+       } else {
+               eldv = IBX_ELD_VALID(port);
+       }
+
+       /* Invalidate ELD */
+       tmp = I915_READ(aud_cntrl_st2);
+       tmp &= ~eldv;
+       I915_WRITE(aud_cntrl_st2, tmp);
+
+       /* Reset ELD write address */
+       tmp = I915_READ(aud_cntl_st);
+       tmp &= ~IBX_ELD_ADDRESS_MASK;
+       I915_WRITE(aud_cntl_st, tmp);
+
+       /* Up to 84 bytes of hw ELD buffer */
+       len = min(drm_eld_size(eld), 84);
+       for (i = 0; i < len / 4; i++)
+               I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+       /* ELD valid */
+       tmp = I915_READ(aud_cntrl_st2);
+       tmp |= eldv;
+       I915_WRITE(aud_cntrl_st2, tmp);
+
+       /* Enable timestamps */
+       tmp = I915_READ(aud_config);
+       tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+       tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+       tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+               tmp |= AUD_CONFIG_N_VALUE_INDEX;
+       else
+               tmp |= audio_config_hdmi_pixel_clock(mode);
+       I915_WRITE(aud_config, tmp);
+}
+
+/**
+ * intel_audio_codec_enable - Enable the audio codec for HD audio
+ * @intel_encoder: encoder on which to enable audio
+ *
+ * The enable sequences may only be performed after enabling the transcoder and
+ * port, and after completed link training.
+ */
+void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
+{
+       struct drm_encoder *encoder = &intel_encoder->base;
+       struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+       struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+       struct drm_connector *connector;
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       connector = drm_select_eld(encoder, mode);
+       if (!connector)
+               return;
+
+       DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+                        connector->base.id,
+                        connector->name,
+                        connector->encoder->base.id,
+                        connector->encoder->name);
+
+       /* ELD Conn_Type */
+       connector->eld[5] &= ~(3 << 2);
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+               connector->eld[5] |= (1 << 2);
+
+       connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
+
+       if (dev_priv->display.audio_codec_enable)
+               dev_priv->display.audio_codec_enable(connector, intel_encoder, mode);
+}
+
+/**
+ * intel_audio_codec_disable - Disable the audio codec for HD audio
+ * @encoder: encoder on which to disable audio
+ *
+ * The disable sequences must be performed before disabling the transcoder or
+ * port.
+ */
+void intel_audio_codec_disable(struct intel_encoder *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->display.audio_codec_disable)
+               dev_priv->display.audio_codec_disable(encoder);
+}
+
+/**
+ * intel_init_audio - Set up chip specific audio functions
+ * @dev: drm device
+ */
+void intel_init_audio(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (IS_G4X(dev)) {
+               dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
+               dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
+       } else if (IS_VALLEYVIEW(dev)) {
+               dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
+               dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+       } else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) {
+               dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
+               dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
+       } else if (HAS_PCH_SPLIT(dev)) {
+               dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
+               dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+       }
+}
index 905999bee2ac5ee7f7b550f1c3f4fa043f422a97..7603765c91fc0fd3f664675d49e885161a1f7eba 100644 (file)
@@ -46,7 +46,7 @@ struct bdb_header {
        u16 version;                    /**< decimal */
        u16 header_size;                /**< in bytes */
        u16 bdb_size;                   /**< in bytes */
-};
+} __packed;
 
 /* strictly speaking, this is a "skip" block, but it has interesting info */
 struct vbios_data {
@@ -252,7 +252,7 @@ union child_device_config {
        /* This one should also be safe to use anywhere, even without version
         * checks. */
        struct common_child_dev_config common;
-};
+} __packed;
 
 struct bdb_general_definitions {
        /* DDC GPIO */
@@ -888,12 +888,12 @@ struct mipi_pps_data {
        u16 bl_disable_delay;
        u16 panel_off_delay;
        u16 panel_power_cycle_delay;
-};
+} __packed;
 
 struct bdb_mipi_config {
        struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
        struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
-};
+} __packed;
 
 /* Block 53 contains MIPI sequences as needed by the panel
  * for enabling it. This block can be variable in size and
@@ -902,7 +902,7 @@ struct bdb_mipi_config {
 struct bdb_mipi_sequence {
        u8 version;
        u8 data[0];
-};
+} __packed;
 
 /* MIPI Sequnece Block definitions */
 enum mipi_seq {
index 9212e6504e0f1a2867ae7913d01cf019107d77b0..a9af9a4866db1310facee99e5ac5c2738cc45cc2 100644 (file)
@@ -72,7 +72,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
        u32 tmp;
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_enabled(dev_priv, power_domain))
+       if (!intel_display_power_is_enabled(dev_priv, power_domain))
                return false;
 
        tmp = I915_READ(crt->adpa_reg);
@@ -775,7 +775,7 @@ static void intel_crt_reset(struct drm_connector *connector)
                I915_WRITE(crt->adpa_reg, adpa);
                POSTING_READ(crt->adpa_reg);
 
-               DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
+               DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
                crt->force_hotplug_required = 1;
        }
 
index b63d4fa204a32b540529b9e49d72cb7108839190..e6b45cd150d387f3d41974e3714e0d73ff46d64c 100644 (file)
@@ -95,8 +95,8 @@ static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
        { 0x00BEFFFF, 0x00140006 },
        { 0x80B2CFFF, 0x001B0002 },
        { 0x00FFFFFF, 0x000E000A },
-       { 0x00D75FFF, 0x00180004 },
-       { 0x80CB2FFF, 0x001B0002 },
+       { 0x00DB6FFF, 0x00160005 },
+       { 0x80C71FFF, 0x001A0002 },
        { 0x00F7DFFF, 0x00180004 },
        { 0x80D75FFF, 0x001B0002 },
 };
@@ -127,6 +127,32 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
        { 0x80FFFFFF, 0x001B0002 },     /* 9:   1000    1000    0       */
 };
 
+static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
+       { 0x00000018, 0x000000a0 },
+       { 0x00004014, 0x00000098 },
+       { 0x00006012, 0x00000088 },
+       { 0x00008010, 0x00000080 },
+       { 0x00000018, 0x00000098 },
+       { 0x00004014, 0x00000088 },
+       { 0x00006012, 0x00000080 },
+       { 0x00000018, 0x00000088 },
+       { 0x00004014, 0x00000080 },
+};
+
+static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
+                                       /* Idx  NT mV   T mV    db  */
+       { 0x00000018, 0x000000a0 },     /* 0:   400     400     0   */
+       { 0x00004014, 0x00000098 },     /* 1:   400     600     3.5 */
+       { 0x00006012, 0x00000088 },     /* 2:   400     800     6   */
+       { 0x00000018, 0x0000003c },     /* 3:   450     450     0   */
+       { 0x00000018, 0x00000098 },     /* 4:   600     600     0   */
+       { 0x00003015, 0x00000088 },     /* 5:   600     800     2.5 */
+       { 0x00005013, 0x00000080 },     /* 6:   600     1000    4.5 */
+       { 0x00000018, 0x00000088 },     /* 7:   800     800     0   */
+       { 0x00000096, 0x00000080 },     /* 8:   800     1000    2   */
+       { 0x00000018, 0x00000080 },     /* 9:   1200    1200    0   */
+};
+
 enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
@@ -169,7 +195,14 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
        const struct ddi_buf_trans *ddi_translations_hdmi;
        const struct ddi_buf_trans *ddi_translations;
 
-       if (IS_BROADWELL(dev)) {
+       if (IS_SKYLAKE(dev)) {
+               ddi_translations_fdi = NULL;
+               ddi_translations_dp = skl_ddi_translations_dp;
+               ddi_translations_edp = skl_ddi_translations_dp;
+               ddi_translations_hdmi = skl_ddi_translations_hdmi;
+               n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+               hdmi_800mV_0dB = 7;
+       } else if (IS_BROADWELL(dev)) {
                ddi_translations_fdi = bdw_ddi_translations_fdi;
                ddi_translations_dp = bdw_ddi_translations_dp;
                ddi_translations_edp = bdw_ddi_translations_edp;
@@ -208,7 +241,10 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
                        ddi_translations = ddi_translations_dp;
                break;
        case PORT_E:
-               ddi_translations = ddi_translations_fdi;
+               if (ddi_translations_fdi)
+                       ddi_translations = ddi_translations_fdi;
+               else
+                       ddi_translations = ddi_translations_dp;
                break;
        default:
                BUG();
@@ -423,6 +459,27 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
        return ret;
 }
 
+static struct intel_encoder *
+intel_ddi_get_crtc_new_encoder(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct intel_encoder *intel_encoder, *ret = NULL;
+       int num_encoders = 0;
+
+       for_each_intel_encoder(dev, intel_encoder) {
+               if (intel_encoder->new_crtc == crtc) {
+                       ret = intel_encoder;
+                       num_encoders++;
+               }
+       }
+
+       WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
+            pipe_name(crtc->pipe));
+
+       BUG_ON(ret == NULL);
+       return ret;
+}
+
 #define LC_FREQ 2700
 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
 
@@ -613,6 +670,111 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
        return (refclk * n * 100) / (p * r);
 }
 
+static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+                              uint32_t dpll)
+{
+       uint32_t cfgcr1_reg, cfgcr2_reg;
+       uint32_t cfgcr1_val, cfgcr2_val;
+       uint32_t p0, p1, p2, dco_freq;
+
+       cfgcr1_reg = GET_CFG_CR1_REG(dpll);
+       cfgcr2_reg = GET_CFG_CR2_REG(dpll);
+
+       cfgcr1_val = I915_READ(cfgcr1_reg);
+       cfgcr2_val = I915_READ(cfgcr2_reg);
+
+       p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK;
+       p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK;
+
+       if (cfgcr2_val &  DPLL_CFGCR2_QDIV_MODE(1))
+               p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+       else
+               p1 = 1;
+
+
+       switch (p0) {
+       case DPLL_CFGCR2_PDIV_1:
+               p0 = 1;
+               break;
+       case DPLL_CFGCR2_PDIV_2:
+               p0 = 2;
+               break;
+       case DPLL_CFGCR2_PDIV_3:
+               p0 = 3;
+               break;
+       case DPLL_CFGCR2_PDIV_7:
+               p0 = 7;
+               break;
+       }
+
+       switch (p2) {
+       case DPLL_CFGCR2_KDIV_5:
+               p2 = 5;
+               break;
+       case DPLL_CFGCR2_KDIV_2:
+               p2 = 2;
+               break;
+       case DPLL_CFGCR2_KDIV_3:
+               p2 = 3;
+               break;
+       case DPLL_CFGCR2_KDIV_1:
+               p2 = 1;
+               break;
+       }
+
+       dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000;
+
+       dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
+               1000) / 0x8000;
+
+       return dco_freq / (p0 * p1 * p2 * 5);
+}
+
+
+static void skl_ddi_clock_get(struct intel_encoder *encoder,
+                               struct intel_crtc_config *pipe_config)
+{
+       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       int link_clock = 0;
+       uint32_t dpll_ctl1, dpll;
+
+       dpll = pipe_config->ddi_pll_sel;
+
+       dpll_ctl1 = I915_READ(DPLL_CTRL1);
+
+       if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
+               link_clock = skl_calc_wrpll_link(dev_priv, dpll);
+       } else {
+               link_clock = dpll_ctl1 & DPLL_CRTL1_LINK_RATE_MASK(dpll);
+               link_clock >>= DPLL_CRTL1_LINK_RATE_SHIFT(dpll);
+
+               switch (link_clock) {
+               case DPLL_CRTL1_LINK_RATE_810:
+                       link_clock = 81000;
+                       break;
+               case DPLL_CRTL1_LINK_RATE_1350:
+                       link_clock = 135000;
+                       break;
+               case DPLL_CRTL1_LINK_RATE_2700:
+                       link_clock = 270000;
+                       break;
+               default:
+                       WARN(1, "Unsupported link rate\n");
+                       break;
+               }
+               link_clock *= 2;
+       }
+
+       pipe_config->port_clock = link_clock;
+
+       if (pipe_config->has_dp_encoder)
+               pipe_config->adjusted_mode.crtc_clock =
+                       intel_dotclock_calculate(pipe_config->port_clock,
+                                                &pipe_config->dp_m_n);
+       else
+               pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
 static void hsw_ddi_clock_get(struct intel_encoder *encoder,
                              struct intel_crtc_config *pipe_config)
 {
@@ -756,7 +918,7 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
                      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
                      WRPLL_DIVIDER_POST(p);
 
-               intel_crtc->config.dpll_hw_state.wrpll = val;
+               intel_crtc->new_config->dpll_hw_state.wrpll = val;
 
                pll = intel_get_shared_dpll(intel_crtc);
                if (pll == NULL) {
@@ -765,12 +927,234 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
                        return false;
                }
 
-               intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
+               intel_crtc->new_config->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
        }
 
        return true;
 }
 
+struct skl_wrpll_params {
+       uint32_t        dco_fraction;
+       uint32_t        dco_integer;
+       uint32_t        qdiv_ratio;
+       uint32_t        qdiv_mode;
+       uint32_t        kdiv;
+       uint32_t        pdiv;
+       uint32_t        central_freq;
+};
+
+static void
+skl_ddi_calculate_wrpll(int clock /* in Hz */,
+                       struct skl_wrpll_params *wrpll_params)
+{
+       uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+       uint64_t dco_central_freq[3] = {8400000000ULL,
+                                       9000000000ULL,
+                                       9600000000ULL};
+       uint32_t min_dco_deviation = 400;
+       uint32_t min_dco_index = 3;
+       uint32_t P0[4] = {1, 2, 3, 7};
+       uint32_t P2[4] = {1, 2, 3, 5};
+       bool found = false;
+       uint32_t candidate_p = 0;
+       uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0};
+       uint32_t candidate_p2[3] = {0};
+       uint32_t dco_central_freq_deviation[3];
+       uint32_t i, P1, k, dco_count;
+       bool retry_with_odd = false;
+       uint64_t dco_freq;
+
+       /* Determine P0, P1 or P2 */
+       for (dco_count = 0; dco_count < 3; dco_count++) {
+               found = false;
+               candidate_p =
+                       div64_u64(dco_central_freq[dco_count], afe_clock);
+               if (retry_with_odd == false)
+                       candidate_p = (candidate_p % 2 == 0 ?
+                               candidate_p : candidate_p + 1);
+
+               for (P1 = 1; P1 < candidate_p; P1++) {
+                       for (i = 0; i < 4; i++) {
+                               if (!(P0[i] != 1 || P1 == 1))
+                                       continue;
+
+                               for (k = 0; k < 4; k++) {
+                                       if (P1 != 1 && P2[k] != 2)
+                                               continue;
+
+                                       if (candidate_p == P0[i] * P1 * P2[k]) {
+                                               /* Found possible P0, P1, P2 */
+                                               found = true;
+                                               candidate_p0[dco_count] = P0[i];
+                                               candidate_p1[dco_count] = P1;
+                                               candidate_p2[dco_count] = P2[k];
+                                               goto found;
+                                       }
+
+                               }
+                       }
+               }
+
+found:
+               if (found) {
+                       dco_central_freq_deviation[dco_count] =
+                               div64_u64(10000 *
+                                         abs_diff((candidate_p * afe_clock),
+                                                  dco_central_freq[dco_count]),
+                                         dco_central_freq[dco_count]);
+
+                       if (dco_central_freq_deviation[dco_count] <
+                               min_dco_deviation) {
+                               min_dco_deviation =
+                                       dco_central_freq_deviation[dco_count];
+                               min_dco_index = dco_count;
+                       }
+               }
+
+               if (min_dco_index > 2 && dco_count == 2) {
+                       retry_with_odd = true;
+                       dco_count = 0;
+               }
+       }
+
+       if (min_dco_index > 2) {
+               WARN(1, "No valid values found for the given pixel clock\n");
+       } else {
+                wrpll_params->central_freq = dco_central_freq[min_dco_index];
+
+                switch (dco_central_freq[min_dco_index]) {
+                case 9600000000ULL:
+                       wrpll_params->central_freq = 0;
+                       break;
+                case 9000000000ULL:
+                       wrpll_params->central_freq = 1;
+                       break;
+                case 8400000000ULL:
+                       wrpll_params->central_freq = 3;
+                }
+
+                switch (candidate_p0[min_dco_index]) {
+                case 1:
+                       wrpll_params->pdiv = 0;
+                       break;
+                case 2:
+                       wrpll_params->pdiv = 1;
+                       break;
+                case 3:
+                       wrpll_params->pdiv = 2;
+                       break;
+                case 7:
+                       wrpll_params->pdiv = 4;
+                       break;
+                default:
+                       WARN(1, "Incorrect PDiv\n");
+                }
+
+                switch (candidate_p2[min_dco_index]) {
+                case 5:
+                       wrpll_params->kdiv = 0;
+                       break;
+                case 2:
+                       wrpll_params->kdiv = 1;
+                       break;
+                case 3:
+                       wrpll_params->kdiv = 2;
+                       break;
+                case 1:
+                       wrpll_params->kdiv = 3;
+                       break;
+                default:
+                       WARN(1, "Incorrect KDiv\n");
+                }
+
+                wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
+                wrpll_params->qdiv_mode =
+                       (wrpll_params->qdiv_ratio == 1) ? 0 : 1;
+
+                dco_freq = candidate_p0[min_dco_index] *
+                        candidate_p1[min_dco_index] *
+                        candidate_p2[min_dco_index] * afe_clock;
+
+               /*
+               * Intermediate values are in Hz.
+               * Divide by MHz to match bsepc
+               */
+                wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
+                wrpll_params->dco_fraction =
+                        div_u64(((div_u64(dco_freq, 24) -
+                                  wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
+
+       }
+}
+
+
+static bool
+skl_ddi_pll_select(struct intel_crtc *intel_crtc,
+                  struct intel_encoder *intel_encoder,
+                  int clock)
+{
+       struct intel_shared_dpll *pll;
+       uint32_t ctrl1, cfgcr1, cfgcr2;
+
+       /*
+        * See comment in intel_dpll_hw_state to understand why we always use 0
+        * as the DPLL id in this function.
+        */
+
+       ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+
+       if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
+               struct skl_wrpll_params wrpll_params = { 0, };
+
+               ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+
+               skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params);
+
+               cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+                        DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+                        wrpll_params.dco_integer;
+
+               cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+                        DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+                        DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+                        DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+                        wrpll_params.central_freq;
+       } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+               struct drm_encoder *encoder = &intel_encoder->base;
+               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+               switch (intel_dp->link_bw) {
+               case DP_LINK_BW_1_62:
+                       ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, 0);
+                       break;
+               case DP_LINK_BW_2_7:
+                       ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, 0);
+                       break;
+               case DP_LINK_BW_5_4:
+                       ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, 0);
+                       break;
+               }
+
+               cfgcr1 = cfgcr2 = 0;
+       } else /* eDP */
+               return true;
+
+       intel_crtc->new_config->dpll_hw_state.ctrl1 = ctrl1;
+       intel_crtc->new_config->dpll_hw_state.cfgcr1 = cfgcr1;
+       intel_crtc->new_config->dpll_hw_state.cfgcr2 = cfgcr2;
+
+       pll = intel_get_shared_dpll(intel_crtc);
+       if (pll == NULL) {
+               DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+                                pipe_name(intel_crtc->pipe));
+               return false;
+       }
+
+       /* shared DPLL id 0 is DPLL 1 */
+       intel_crtc->new_config->ddi_pll_sel = pll->id + 1;
+
+       return true;
+}
 
 /*
  * Tries to find a *shared* PLL for the CRTC and store it in
@@ -781,13 +1165,15 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
  */
 bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
 {
-       struct drm_crtc *crtc = &intel_crtc->base;
-       struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
-       int clock = intel_crtc->config.port_clock;
-
-       intel_put_shared_dpll(intel_crtc);
+       struct drm_device *dev = intel_crtc->base.dev;
+       struct intel_encoder *intel_encoder =
+               intel_ddi_get_crtc_new_encoder(intel_crtc);
+       int clock = intel_crtc->new_config->port_clock;
 
-       return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
+       if (IS_SKYLAKE(dev))
+               return skl_ddi_pll_select(intel_crtc, intel_encoder, clock);
+       else
+               return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
 }
 
 void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
@@ -962,7 +1348,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
        uint32_t tmp;
 
        power_domain = intel_display_port_power_domain(intel_encoder);
-       if (!intel_display_power_enabled(dev_priv, power_domain))
+       if (!intel_display_power_is_enabled(dev_priv, power_domain))
                return false;
 
        if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
@@ -1008,7 +1394,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
        int i;
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_enabled(dev_priv, power_domain))
+       if (!intel_display_power_is_enabled(dev_priv, power_domain))
                return false;
 
        tmp = I915_READ(DDI_BUF_CTL(port));
@@ -1079,27 +1465,53 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
 static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
-       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
        enum port port = intel_ddi_get_encoder_port(intel_encoder);
        int type = intel_encoder->type;
 
-       if (crtc->config.has_audio) {
-               DRM_DEBUG_DRIVER("Audio on pipe %c on DDI\n",
-                                pipe_name(crtc->pipe));
-
-               /* write eld */
-               DRM_DEBUG_DRIVER("DDI audio: write eld information\n");
-               intel_write_eld(encoder, &crtc->config.adjusted_mode);
-       }
-
        if (type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
                intel_edp_panel_on(intel_dp);
        }
 
-       WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
-       I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
+       if (IS_SKYLAKE(dev)) {
+               uint32_t dpll = crtc->config.ddi_pll_sel;
+               uint32_t val;
+
+               /*
+                * DPLL0 is used for eDP and is the only "private" DPLL (as
+                * opposed to shared) on SKL
+                */
+               if (type == INTEL_OUTPUT_EDP) {
+                       WARN_ON(dpll != SKL_DPLL0);
+
+                       val = I915_READ(DPLL_CTRL1);
+
+                       val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
+                                DPLL_CTRL1_SSC(dpll) |
+                                DPLL_CRTL1_LINK_RATE_MASK(dpll));
+                       val |= crtc->config.dpll_hw_state.ctrl1 << (dpll * 6);
+
+                       I915_WRITE(DPLL_CTRL1, val);
+                       POSTING_READ(DPLL_CTRL1);
+               }
+
+               /* DDI -> PLL mapping  */
+               val = I915_READ(DPLL_CTRL2);
+
+               val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
+                       DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
+               val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
+                       DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
+
+               I915_WRITE(DPLL_CTRL2, val);
+
+       } else {
+               WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
+               I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
+       }
 
        if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1109,7 +1521,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
                intel_dp_start_link_train(intel_dp);
                intel_dp_complete_link_train(intel_dp);
-               if (port != PORT_A)
+               if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
                        intel_dp_stop_link_train(intel_dp);
        } else if (type == INTEL_OUTPUT_HDMI) {
                struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
@@ -1123,7 +1535,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
 static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
-       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        enum port port = intel_ddi_get_encoder_port(intel_encoder);
        int type = intel_encoder->type;
        uint32_t val;
@@ -1151,7 +1564,11 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
                intel_edp_panel_off(intel_dp);
        }
 
-       I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+       if (IS_SKYLAKE(dev))
+               I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
+                                       DPLL_CTRL2_DDI_CLK_OFF(port)));
+       else
+               I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
 }
 
 static void intel_enable_ddi(struct intel_encoder *intel_encoder)
@@ -1159,12 +1576,10 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_crtc *crtc = encoder->crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum port port = intel_ddi_get_encoder_port(intel_encoder);
        int type = intel_encoder->type;
-       uint32_t tmp;
 
        if (type == INTEL_OUTPUT_HDMI) {
                struct intel_digital_port *intel_dig_port =
@@ -1180,18 +1595,16 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
        } else if (type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
-               if (port == PORT_A)
+               if (port == PORT_A && INTEL_INFO(dev)->gen < 9)
                        intel_dp_stop_link_train(intel_dp);
 
                intel_edp_backlight_on(intel_dp);
-               intel_edp_psr_enable(intel_dp);
+               intel_psr_enable(intel_dp);
        }
 
        if (intel_crtc->config.has_audio) {
                intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
-               tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
-               tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
-               I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+               intel_audio_codec_enable(intel_encoder);
        }
 }
 
@@ -1200,30 +1613,71 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_crtc *crtc = encoder->crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
        int type = intel_encoder->type;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t tmp;
 
-       /* We can't touch HSW_AUD_PIN_ELD_CP_VLD uncionditionally because this
-        * register is part of the power well on Haswell. */
        if (intel_crtc->config.has_audio) {
-               tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
-               tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
-                        (pipe * 4));
-               I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+               intel_audio_codec_disable(intel_encoder);
                intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
        }
 
        if (type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
-               intel_edp_psr_disable(intel_dp);
+               intel_psr_disable(intel_dp);
                intel_edp_backlight_off(intel_dp);
        }
 }
 
+static int skl_get_cdclk_freq(struct drm_i915_private *dev_priv)
+{
+       uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
+       uint32_t cdctl = I915_READ(CDCLK_CTL);
+       uint32_t linkrate;
+
+       if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
+               WARN(1, "LCPLL1 not enabled\n");
+               return 24000; /* 24MHz is the cd freq with NSSC ref */
+       }
+
+       if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
+               return 540000;
+
+       linkrate = (I915_READ(DPLL_CTRL1) &
+                   DPLL_CRTL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
+
+       if (linkrate == DPLL_CRTL1_LINK_RATE_2160 ||
+           linkrate == DPLL_CRTL1_LINK_RATE_1080) {
+               /* vco 8640 */
+               switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+               case CDCLK_FREQ_450_432:
+                       return 432000;
+               case CDCLK_FREQ_337_308:
+                       return 308570;
+               case CDCLK_FREQ_675_617:
+                       return 617140;
+               default:
+                       WARN(1, "Unknown cd freq selection\n");
+               }
+       } else {
+               /* vco 8100 */
+               switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+               case CDCLK_FREQ_450_432:
+                       return 450000;
+               case CDCLK_FREQ_337_308:
+                       return 337500;
+               case CDCLK_FREQ_675_617:
+                       return 675000;
+               default:
+                       WARN(1, "Unknown cd freq selection\n");
+               }
+       }
+
+       /* error case, do as if DPLL0 isn't enabled */
+       return 24000;
+}
+
 static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv)
 {
        uint32_t lcpll = I915_READ(LCPLL_CTL);
@@ -1255,7 +1709,7 @@ static int hsw_get_cdclk_freq(struct drm_i915_private *dev_priv)
                return 450000;
        else if (freq == LCPLL_CLK_FREQ_450)
                return 450000;
-       else if (IS_ULT(dev))
+       else if (IS_HSW_ULT(dev))
                return 337500;
        else
                return 540000;
@@ -1265,6 +1719,9 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = dev_priv->dev;
 
+       if (IS_SKYLAKE(dev))
+               return skl_get_cdclk_freq(dev_priv);
+
        if (IS_BROADWELL(dev))
                return bdw_get_cdclk_freq(dev_priv);
 
@@ -1275,7 +1732,7 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
 static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
                               struct intel_shared_dpll *pll)
 {
-       I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll);
+       I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
        POSTING_READ(WRPLL_CTL(pll->id));
        udelay(20);
 }
@@ -1296,7 +1753,7 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 {
        uint32_t val;
 
-       if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
                return false;
 
        val = I915_READ(WRPLL_CTL(pll->id));
@@ -1326,26 +1783,156 @@ static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
        }
 }
 
+static const char * const skl_ddi_pll_names[] = {
+       "DPLL 1",
+       "DPLL 2",
+       "DPLL 3",
+};
+
+struct skl_dpll_regs {
+       u32 ctl, cfgcr1, cfgcr2;
+};
+
+/* this array is indexed by the *shared* pll id */
+static const struct skl_dpll_regs skl_dpll_regs[3] = {
+       {
+               /* DPLL 1 */
+               .ctl = LCPLL2_CTL,
+               .cfgcr1 = DPLL1_CFGCR1,
+               .cfgcr2 = DPLL1_CFGCR2,
+       },
+       {
+               /* DPLL 2 */
+               .ctl = WRPLL_CTL1,
+               .cfgcr1 = DPLL2_CFGCR1,
+               .cfgcr2 = DPLL2_CFGCR2,
+       },
+       {
+               /* DPLL 3 */
+               .ctl = WRPLL_CTL2,
+               .cfgcr1 = DPLL3_CFGCR1,
+               .cfgcr2 = DPLL3_CFGCR2,
+       },
+};
+
+static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
+                              struct intel_shared_dpll *pll)
+{
+       uint32_t val;
+       unsigned int dpll;
+       const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+       /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
+       dpll = pll->id + 1;
+
+       val = I915_READ(DPLL_CTRL1);
+
+       val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
+                DPLL_CRTL1_LINK_RATE_MASK(dpll));
+       val |= pll->config.hw_state.ctrl1 << (dpll * 6);
+
+       I915_WRITE(DPLL_CTRL1, val);
+       POSTING_READ(DPLL_CTRL1);
+
+       I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
+       I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
+       POSTING_READ(regs[pll->id].cfgcr1);
+       POSTING_READ(regs[pll->id].cfgcr2);
+
+       /* the enable bit is always bit 31 */
+       I915_WRITE(regs[pll->id].ctl,
+                  I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
+
+       if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(dpll), 5))
+               DRM_ERROR("DPLL %d not locked\n", dpll);
+}
+
+static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
+                               struct intel_shared_dpll *pll)
+{
+       const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+       /* the enable bit is always bit 31 */
+       I915_WRITE(regs[pll->id].ctl,
+                  I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
+       POSTING_READ(regs[pll->id].ctl);
+}
+
+static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+                                    struct intel_shared_dpll *pll,
+                                    struct intel_dpll_hw_state *hw_state)
+{
+       uint32_t val;
+       unsigned int dpll;
+       const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+               return false;
+
+       /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
+       dpll = pll->id + 1;
+
+       val = I915_READ(regs[pll->id].ctl);
+       if (!(val & LCPLL_PLL_ENABLE))
+               return false;
+
+       val = I915_READ(DPLL_CTRL1);
+       hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
+
+       /* avoid reading back stale values if HDMI mode is not enabled */
+       if (val & DPLL_CTRL1_HDMI_MODE(dpll)) {
+               hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
+               hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
+       }
+
+       return true;
+}
+
+static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
+{
+       int i;
+
+       dev_priv->num_shared_dpll = 3;
+
+       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+               dev_priv->shared_dplls[i].id = i;
+               dev_priv->shared_dplls[i].name = skl_ddi_pll_names[i];
+               dev_priv->shared_dplls[i].disable = skl_ddi_pll_disable;
+               dev_priv->shared_dplls[i].enable = skl_ddi_pll_enable;
+               dev_priv->shared_dplls[i].get_hw_state =
+                       skl_ddi_pll_get_hw_state;
+       }
+}
+
 void intel_ddi_pll_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t val = I915_READ(LCPLL_CTL);
 
-       hsw_shared_dplls_init(dev_priv);
-
-       /* The LCPLL register should be turned on by the BIOS. For now let's
-        * just check its state and print errors in case something is wrong.
-        * Don't even try to turn it on.
-        */
+       if (IS_SKYLAKE(dev))
+               skl_shared_dplls_init(dev_priv);
+       else
+               hsw_shared_dplls_init(dev_priv);
 
        DRM_DEBUG_KMS("CDCLK running at %dKHz\n",
                      intel_ddi_get_cdclk_freq(dev_priv));
 
-       if (val & LCPLL_CD_SOURCE_FCLK)
-               DRM_ERROR("CDCLK source is not LCPLL\n");
+       if (IS_SKYLAKE(dev)) {
+               if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
+                       DRM_ERROR("LCPLL1 is disabled\n");
+       } else {
+               /*
+                * The LCPLL register should be turned on by the BIOS. For now
+                * let's just check its state and print errors in case
+                * something is wrong.  Don't even try to turn it on.
+                */
 
-       if (val & LCPLL_PLL_DISABLE)
-               DRM_ERROR("LCPLL is disabled\n");
+               if (val & LCPLL_CD_SOURCE_FCLK)
+                       DRM_ERROR("CDCLK source is not LCPLL\n");
+
+               if (val & LCPLL_PLL_DISABLE)
+                       DRM_ERROR("LCPLL is disabled\n");
+       }
 }
 
 void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
@@ -1440,7 +2027,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+       struct intel_hdmi *intel_hdmi;
        u32 temp, flags = 0;
+       struct drm_device *dev = dev_priv->dev;
 
        temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
        if (temp & TRANS_DDI_PHSYNC)
@@ -1474,6 +2063,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
        switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
        case TRANS_DDI_MODE_SELECT_HDMI:
                pipe_config->has_hdmi_sink = true;
+               intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+
+               if (intel_hdmi->infoframe_enabled(&encoder->base))
+                       pipe_config->has_infoframe = true;
+               break;
        case TRANS_DDI_MODE_SELECT_DVI:
        case TRANS_DDI_MODE_SELECT_FDI:
                break;
@@ -1486,9 +2080,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
                break;
        }
 
-       if (intel_display_power_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+       if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
                temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
-               if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4)))
+               if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
                        pipe_config->has_audio = true;
        }
 
@@ -1512,7 +2106,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
                dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
        }
 
-       hsw_ddi_clock_get(encoder, pipe_config);
+       if (INTEL_INFO(dev)->gen <= 8)
+               hsw_ddi_clock_get(encoder, pipe_config);
+       else
+               skl_ddi_clock_get(encoder, pipe_config);
 }
 
 static void intel_ddi_destroy(struct drm_encoder *encoder)
index f0a1a56406ebde9bf7119b4a56280348805c533f..63247c64b1e05efbd766e97056b272b064773fca 100644 (file)
@@ -73,8 +73,6 @@ static const uint32_t intel_cursor_formats[] = {
        DRM_FORMAT_ARGB8888,
 };
 
-static void intel_increase_pllclock(struct drm_device *dev,
-                                   enum pipe pipe);
 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -96,8 +94,10 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
 static void haswell_set_pipeconf(struct drm_crtc *crtc);
 static void intel_set_pipe_csc(struct drm_crtc *crtc);
-static void vlv_prepare_pll(struct intel_crtc *crtc);
-static void chv_prepare_pll(struct intel_crtc *crtc);
+static void vlv_prepare_pll(struct intel_crtc *crtc,
+                           const struct intel_crtc_config *pipe_config);
+static void chv_prepare_pll(struct intel_crtc *crtc,
+                           const struct intel_crtc_config *pipe_config);
 
 static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
 {
@@ -408,25 +408,43 @@ static void vlv_clock(int refclk, intel_clock_t *clock)
 /**
  * Returns whether any output on the specified pipe is of the specified type
  */
-static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
+bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        struct intel_encoder *encoder;
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
+       for_each_encoder_on_crtc(dev, &crtc->base, encoder)
                if (encoder->type == type)
                        return true;
 
        return false;
 }
 
-static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+/**
+ * Returns whether any output on the specified pipe will have the specified
+ * type after a staged modeset is complete, i.e., the same as
+ * intel_pipe_has_type() but looking at encoder->new_crtc instead of
+ * encoder->crtc.
+ */
+static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct intel_encoder *encoder;
+
+       for_each_intel_encoder(dev, encoder)
+               if (encoder->new_crtc == crtc && encoder->type == type)
+                       return true;
+
+       return false;
+}
+
+static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
                                                int refclk)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        const intel_limit_t *limit;
 
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+       if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
                if (intel_is_dual_link_lvds(dev)) {
                        if (refclk == 100000)
                                limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -444,20 +462,20 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
        return limit;
 }
 
-static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        const intel_limit_t *limit;
 
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+       if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
                if (intel_is_dual_link_lvds(dev))
                        limit = &intel_limits_g4x_dual_channel_lvds;
                else
                        limit = &intel_limits_g4x_single_channel_lvds;
-       } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
-                  intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+       } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
+                  intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
                limit = &intel_limits_g4x_hdmi;
-       } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
+       } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
                limit = &intel_limits_g4x_sdvo;
        } else /* The option is for other outputs */
                limit = &intel_limits_i9xx_sdvo;
@@ -465,9 +483,9 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
        return limit;
 }
 
-static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
+static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        const intel_limit_t *limit;
 
        if (HAS_PCH_SPLIT(dev))
@@ -475,7 +493,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
        else if (IS_G4X(dev)) {
                limit = intel_g4x_limit(crtc);
        } else if (IS_PINEVIEW(dev)) {
-               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+               if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
                        limit = &intel_limits_pineview_lvds;
                else
                        limit = &intel_limits_pineview_sdvo;
@@ -484,14 +502,14 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
        } else if (IS_VALLEYVIEW(dev)) {
                limit = &intel_limits_vlv;
        } else if (!IS_GEN2(dev)) {
-               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+               if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
                        limit = &intel_limits_i9xx_lvds;
                else
                        limit = &intel_limits_i9xx_sdvo;
        } else {
-               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+               if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
                        limit = &intel_limits_i8xx_lvds;
-               else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
+               else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
                        limit = &intel_limits_i8xx_dvo;
                else
                        limit = &intel_limits_i8xx_dac;
@@ -578,15 +596,15 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
 }
 
 static bool
-i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
                    int target, int refclk, intel_clock_t *match_clock,
                    intel_clock_t *best_clock)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        intel_clock_t clock;
        int err = target;
 
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+       if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
                /*
                 * For LVDS just rely on its current settings for dual-channel.
                 * We haven't figured out how to reliably set up different
@@ -639,15 +657,15 @@ i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
 }
 
 static bool
-pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
                   int target, int refclk, intel_clock_t *match_clock,
                   intel_clock_t *best_clock)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        intel_clock_t clock;
        int err = target;
 
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+       if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
                /*
                 * For LVDS just rely on its current settings for dual-channel.
                 * We haven't figured out how to reliably set up different
@@ -698,11 +716,11 @@ pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
 }
 
 static bool
-g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
                   int target, int refclk, intel_clock_t *match_clock,
                   intel_clock_t *best_clock)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        intel_clock_t clock;
        int max_n;
        bool found;
@@ -710,7 +728,7 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
        int err_most = (target >> 8) + (target >> 9);
        found = false;
 
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+       if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
                if (intel_is_dual_link_lvds(dev))
                        clock.p2 = limit->p2.p2_fast;
                else
@@ -755,11 +773,11 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
 }
 
 static bool
-vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
                   int target, int refclk, intel_clock_t *match_clock,
                   intel_clock_t *best_clock)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        intel_clock_t clock;
        unsigned int bestppm = 1000000;
        /* min update 19.2 MHz */
@@ -812,11 +830,11 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
 }
 
 static bool
-chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
                   int target, int refclk, intel_clock_t *match_clock,
                   intel_clock_t *best_clock)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        intel_clock_t clock;
        uint64_t m2;
        int found = false;
@@ -889,60 +907,6 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
        return intel_crtc->config.cpu_transcoder;
 }
 
-static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
-
-       frame = I915_READ(frame_reg);
-
-       if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
-               WARN(1, "vblank wait on pipe %c timed out\n",
-                    pipe_name(pipe));
-}
-
-/**
- * intel_wait_for_vblank - wait for vblank on a given pipe
- * @dev: drm device
- * @pipe: pipe to wait for
- *
- * Wait for vblank to occur on a given pipe.  Needed for various bits of
- * mode setting code.
- */
-void intel_wait_for_vblank(struct drm_device *dev, int pipe)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipestat_reg = PIPESTAT(pipe);
-
-       if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
-               g4x_wait_for_vblank(dev, pipe);
-               return;
-       }
-
-       /* Clear existing vblank status. Note this will clear any other
-        * sticky status fields as well.
-        *
-        * This races with i915_driver_irq_handler() with the result
-        * that either function could miss a vblank event.  Here it is not
-        * fatal, as we will either wait upon the next vblank interrupt or
-        * timeout.  Generally speaking intel_wait_for_vblank() is only
-        * called during modeset at which time the GPU should be idle and
-        * should *not* be performing page flips and thus not waiting on
-        * vblanks...
-        * Currently, the result of us stealing a vblank from the irq
-        * handler is that a single frame will be skipped during swapbuffers.
-        */
-       I915_WRITE(pipestat_reg,
-                  I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
-
-       /* Wait for vblank interrupt bit to set */
-       if (wait_for(I915_READ(pipestat_reg) &
-                    PIPE_VBLANK_INTERRUPT_STATUS,
-                    50))
-               DRM_DEBUG_KMS("vblank wait on pipe %c timed out\n",
-                             pipe_name(pipe));
-}
-
 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1189,8 +1153,8 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
             state_string(state), state_string(cur_state));
 }
 
-static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
-                                 enum pipe pipe)
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+                          enum pipe pipe)
 {
        struct drm_device *dev = dev_priv->dev;
        int pp_reg;
@@ -1263,7 +1227,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
            (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
                state = true;
 
-       if (!intel_display_power_enabled(dev_priv,
+       if (!intel_display_power_is_enabled(dev_priv,
                                POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
                cur_state = false;
        } else {
@@ -1332,7 +1296,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
        int reg, sprite;
        u32 val;
 
-       if (IS_VALLEYVIEW(dev)) {
+       if (INTEL_INFO(dev)->gen >= 9) {
+               for_each_sprite(pipe, sprite) {
+                       val = I915_READ(PLANE_CTL(pipe, sprite));
+                       WARN(val & PLANE_CTL_ENABLE,
+                            "plane %d assertion failure, should be off on pipe %c but is still active\n",
+                            sprite, pipe_name(pipe));
+               }
+       } else if (IS_VALLEYVIEW(dev)) {
                for_each_sprite(pipe, sprite) {
                        reg = SPCNTR(pipe, sprite);
                        val = I915_READ(reg);
@@ -1533,12 +1504,13 @@ static void intel_init_dpio(struct drm_device *dev)
        }
 }
 
-static void vlv_enable_pll(struct intel_crtc *crtc)
+static void vlv_enable_pll(struct intel_crtc *crtc,
+                          const struct intel_crtc_config *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int reg = DPLL(crtc->pipe);
-       u32 dpll = crtc->config.dpll_hw_state.dpll;
+       u32 dpll = pipe_config->dpll_hw_state.dpll;
 
        assert_pipe_disabled(dev_priv, crtc->pipe);
 
@@ -1556,7 +1528,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc)
        if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
                DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
 
-       I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
+       I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
        POSTING_READ(DPLL_MD(crtc->pipe));
 
        /* We do this three times for luck */
@@ -1571,7 +1543,8 @@ static void vlv_enable_pll(struct intel_crtc *crtc)
        udelay(150); /* wait for warmup */
 }
 
-static void chv_enable_pll(struct intel_crtc *crtc)
+static void chv_enable_pll(struct intel_crtc *crtc,
+                          const struct intel_crtc_config *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1596,14 +1569,14 @@ static void chv_enable_pll(struct intel_crtc *crtc)
        udelay(1);
 
        /* Enable PLL */
-       I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
+       I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
 
        /* Check PLL is locked */
        if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
                DRM_ERROR("PLL %d failed to lock\n", pipe);
 
        /* not sure when this should be written */
-       I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
+       I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
        POSTING_READ(DPLL_MD(pipe));
 
        mutex_unlock(&dev_priv->dpio_lock);
@@ -1616,7 +1589,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
 
        for_each_intel_crtc(dev, crtc)
                count += crtc->active &&
-                       intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO);
+                       intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
 
        return count;
 }
@@ -1695,7 +1668,7 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
 
        /* Disable DVO 2x clock on both PLLs if necessary */
        if (IS_I830(dev) &&
-           intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) &&
+           intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
            intel_num_dvo_pipes(dev) == 1) {
                I915_WRITE(DPLL(PIPE_B),
                           I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1806,7 +1779,7 @@ static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
        if (WARN_ON(pll == NULL))
                return;
 
-       WARN_ON(!pll->refcount);
+       WARN_ON(!pll->config.crtc_mask);
        if (pll->active == 0) {
                DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
                WARN_ON(pll->on);
@@ -1833,7 +1806,7 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
        if (WARN_ON(pll == NULL))
                return;
 
-       if (WARN_ON(pll->refcount == 0))
+       if (WARN_ON(pll->config.crtc_mask == 0))
                return;
 
        DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
@@ -1865,7 +1838,7 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
        if (WARN_ON(pll == NULL))
               return;
 
-       if (WARN_ON(pll->refcount == 0))
+       if (WARN_ON(pll->config.crtc_mask == 0))
                return;
 
        DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
@@ -1933,7 +1906,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
        val &= ~TRANS_INTERLACE_MASK;
        if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
                if (HAS_PCH_IBX(dev_priv->dev) &&
-                   intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
+                   intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
                        val |= TRANS_LEGACY_INTERLACED_ILK;
                else
                        val |= TRANS_INTERLACED;
@@ -2056,7 +2029,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
         * need the check.
         */
        if (!HAS_PCH_SPLIT(dev_priv->dev))
-               if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
+               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
                        assert_dsi_pll_enabled(dev_priv);
                else
                        assert_pll_enabled(dev_priv, pipe);
@@ -2221,11 +2194,13 @@ static int intel_align_height(struct drm_device *dev, int height, bool tiled)
 }
 
 int
-intel_pin_and_fence_fb_obj(struct drm_device *dev,
-                          struct drm_i915_gem_object *obj,
+intel_pin_and_fence_fb_obj(struct drm_plane *plane,
+                          struct drm_framebuffer *fb,
                           struct intel_engine_cs *pipelined)
 {
+       struct drm_device *dev = fb->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        u32 alignment;
        int ret;
 
@@ -2233,7 +2208,9 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
 
        switch (obj->tiling_mode) {
        case I915_TILING_NONE:
-               if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+               if (INTEL_INFO(dev)->gen >= 9)
+                       alignment = 256 * 1024;
+               else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
                        alignment = 128 * 1024;
                else if (INTEL_INFO(dev)->gen >= 4)
                        alignment = 4 * 1024;
@@ -2241,8 +2218,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
                        alignment = 64 * 1024;
                break;
        case I915_TILING_X:
-               /* pin() will align the object as required by fence */
-               alignment = 0;
+               if (INTEL_INFO(dev)->gen >= 9)
+                       alignment = 256 * 1024;
+               else {
+                       /* pin() will align the object as required by fence */
+                       alignment = 0;
+               }
                break;
        case I915_TILING_Y:
                WARN(1, "Y tiled bo slipped through, driver bug!\n");
@@ -2402,6 +2383,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
                                 struct intel_plane_config *plane_config)
 {
        struct drm_device *dev = intel_crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *c;
        struct intel_crtc *i;
        struct drm_i915_gem_object *obj;
@@ -2433,6 +2415,9 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
                        continue;
 
                if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
+                       if (obj->tiling_mode != I915_TILING_NONE)
+                               dev_priv->preserve_bios_swizzle = true;
+
                        drm_framebuffer_reference(c->primary->fb);
                        intel_crtc->base.primary->fb = c->primary->fb;
                        obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
@@ -2486,6 +2471,12 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
                           ((intel_crtc->config.pipe_src_h - 1) << 16) |
                           (intel_crtc->config.pipe_src_w - 1));
                I915_WRITE(DSPPOS(plane), 0);
+       } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
+               I915_WRITE(PRIMSIZE(plane),
+                          ((intel_crtc->config.pipe_src_h - 1) << 16) |
+                          (intel_crtc->config.pipe_src_w - 1));
+               I915_WRITE(PRIMPOS(plane), 0);
+               I915_WRITE(PRIMCNSTALPHA(plane), 0);
        }
 
        switch (fb->pixel_format) {
@@ -2672,6 +2663,92 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
        POSTING_READ(reg);
 }
 
+static void skylake_update_primary_plane(struct drm_crtc *crtc,
+                                        struct drm_framebuffer *fb,
+                                        int x, int y)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_framebuffer *intel_fb;
+       struct drm_i915_gem_object *obj;
+       int pipe = intel_crtc->pipe;
+       u32 plane_ctl, stride;
+
+       if (!intel_crtc->primary_enabled) {
+               I915_WRITE(PLANE_CTL(pipe, 0), 0);
+               I915_WRITE(PLANE_SURF(pipe, 0), 0);
+               POSTING_READ(PLANE_CTL(pipe, 0));
+               return;
+       }
+
+       plane_ctl = PLANE_CTL_ENABLE |
+                   PLANE_CTL_PIPE_GAMMA_ENABLE |
+                   PLANE_CTL_PIPE_CSC_ENABLE;
+
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_RGB565:
+               plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+               break;
+       case DRM_FORMAT_XBGR8888:
+               plane_ctl |= PLANE_CTL_ORDER_RGBX;
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+               break;
+       case DRM_FORMAT_XRGB2101010:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
+               break;
+       case DRM_FORMAT_XBGR2101010:
+               plane_ctl |= PLANE_CTL_ORDER_RGBX;
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
+               break;
+       default:
+               BUG();
+       }
+
+       intel_fb = to_intel_framebuffer(fb);
+       obj = intel_fb->obj;
+
+       /*
+        * The stride is either expressed as a multiple of 64 bytes chunks for
+        * linear buffers or in number of tiles for tiled buffers.
+        */
+       switch (obj->tiling_mode) {
+       case I915_TILING_NONE:
+               stride = fb->pitches[0] >> 6;
+               break;
+       case I915_TILING_X:
+               plane_ctl |= PLANE_CTL_TILED_X;
+               stride = fb->pitches[0] >> 9;
+               break;
+       default:
+               BUG();
+       }
+
+       plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
+       if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
+               plane_ctl |= PLANE_CTL_ROTATE_180;
+
+       I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
+
+       DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
+                     i915_gem_obj_ggtt_offset(obj),
+                     x, y, fb->width, fb->height,
+                     fb->pitches[0]);
+
+       I915_WRITE(PLANE_POS(pipe, 0), 0);
+       I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
+       I915_WRITE(PLANE_SIZE(pipe, 0),
+                  (intel_crtc->config.pipe_src_h - 1) << 16 |
+                  (intel_crtc->config.pipe_src_w - 1));
+       I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+       I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
+
+       POSTING_READ(PLANE_SURF(pipe, 0));
+}
+
 /* Assume fb object is pinned & idle & fenced and just update base pointers */
 static int
 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -2682,32 +2759,16 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 
        if (dev_priv->display.disable_fbc)
                dev_priv->display.disable_fbc(dev);
-       intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
 
        dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
        return 0;
 }
 
-void intel_display_handle_reset(struct drm_device *dev)
+static void intel_complete_page_flips(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
 
-       /*
-        * Flips in the rings have been nuked by the reset,
-        * so complete all pending flips so that user space
-        * will get its events and not get stuck.
-        *
-        * Also update the base address of all primary
-        * planes to the the last fb to make sure we're
-        * showing the correct fb after a reset.
-        *
-        * Need to make two loops over the crtcs so that we
-        * don't try to grab a crtc mutex before the
-        * pending_flip_queue really got woken up.
-        */
-
        for_each_crtc(dev, crtc) {
                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
                enum plane plane = intel_crtc->plane;
@@ -2715,6 +2776,12 @@ void intel_display_handle_reset(struct drm_device *dev)
                intel_prepare_page_flip(dev, plane);
                intel_finish_page_flip_plane(dev, plane);
        }
+}
+
+static void intel_update_primary_planes(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc;
 
        for_each_crtc(dev, crtc) {
                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -2734,6 +2801,79 @@ void intel_display_handle_reset(struct drm_device *dev)
        }
 }
 
+void intel_prepare_reset(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *crtc;
+
+       /* no reset support for gen2 */
+       if (IS_GEN2(dev))
+               return;
+
+       /* reset doesn't touch the display */
+       if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+               return;
+
+       drm_modeset_lock_all(dev);
+
+       /*
+        * Disabling the crtcs gracefully seems nicer. Also the
+        * g33 docs say we should at least disable all the planes.
+        */
+       for_each_intel_crtc(dev, crtc) {
+               if (crtc->active)
+                       dev_priv->display.crtc_disable(&crtc->base);
+       }
+}
+
+void intel_finish_reset(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       /*
+        * Flips in the rings will be nuked by the reset,
+        * so complete all pending flips so that user space
+        * will get its events and not get stuck.
+        */
+       intel_complete_page_flips(dev);
+
+       /* no reset support for gen2 */
+       if (IS_GEN2(dev))
+               return;
+
+       /* reset doesn't touch the display */
+       if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
+               /*
+                * Flips in the rings have been nuked by the reset,
+                * so update the base address of all primary
+                * planes to the the last fb to make sure we're
+                * showing the correct fb after a reset.
+                */
+               intel_update_primary_planes(dev);
+               return;
+       }
+
+       /*
+        * The display has been reset as well,
+        * so need a full re-initialization.
+        */
+       intel_runtime_pm_disable_interrupts(dev_priv);
+       intel_runtime_pm_enable_interrupts(dev_priv);
+
+       intel_modeset_init_hw(dev);
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       if (dev_priv->display.hpd_irq_setup)
+               dev_priv->display.hpd_irq_setup(dev);
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       intel_modeset_setup_hw_state(dev, true);
+
+       intel_hpd_init(dev_priv);
+
+       drm_modeset_unlock_all(dev);
+}
+
 static int
 intel_finish_fb(struct drm_framebuffer *old_fb)
 {
@@ -2762,20 +2902,58 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       unsigned long flags;
        bool pending;
 
        if (i915_reset_in_progress(&dev_priv->gpu_error) ||
            intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
                return false;
 
-       spin_lock_irqsave(&dev->event_lock, flags);
+       spin_lock_irq(&dev->event_lock);
        pending = to_intel_crtc(crtc)->unpin_work != NULL;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       spin_unlock_irq(&dev->event_lock);
 
        return pending;
 }
 
+static void intel_update_pipe_size(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       const struct drm_display_mode *adjusted_mode;
+
+       if (!i915.fastboot)
+               return;
+
+       /*
+        * Update pipe size and adjust fitter if needed: the reason for this is
+        * that in compute_mode_changes we check the native mode (not the pfit
+        * mode) to see if we can flip rather than do a full mode set. In the
+        * fastboot case, we'll flip, but if we don't update the pipesrc and
+        * pfit state, we'll end up with a big fb scanned out into the wrong
+        * sized surface.
+        *
+        * To fix this properly, we need to hoist the checks up into
+        * compute_mode_changes (or above), check the actual pfit state and
+        * whether the platform allows pfit disable with pipe active, and only
+        * then update the pipesrc and pfit state, even on the flip path.
+        */
+
+       adjusted_mode = &crtc->config.adjusted_mode;
+
+       I915_WRITE(PIPESRC(crtc->pipe),
+                  ((adjusted_mode->crtc_hdisplay - 1) << 16) |
+                  (adjusted_mode->crtc_vdisplay - 1));
+       if (!crtc->config.pch_pfit.enabled &&
+           (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+            intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+               I915_WRITE(PF_CTL(crtc->pipe), 0);
+               I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
+               I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
+       }
+       crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
+       crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
+}
+
 static int
 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                    struct drm_framebuffer *fb)
@@ -2785,7 +2963,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum pipe pipe = intel_crtc->pipe;
        struct drm_framebuffer *old_fb = crtc->primary->fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
        int ret;
 
@@ -2808,9 +2985,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        }
 
        mutex_lock(&dev->struct_mutex);
-       ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+       ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
        if (ret == 0)
-               i915_gem_track_fb(old_obj, obj,
+               i915_gem_track_fb(old_obj, intel_fb_obj(fb),
                                  INTEL_FRONTBUFFER_PRIMARY(pipe));
        mutex_unlock(&dev->struct_mutex);
        if (ret != 0) {
@@ -2818,37 +2995,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                return ret;
        }
 
-       /*
-        * Update pipe size and adjust fitter if needed: the reason for this is
-        * that in compute_mode_changes we check the native mode (not the pfit
-        * mode) to see if we can flip rather than do a full mode set. In the
-        * fastboot case, we'll flip, but if we don't update the pipesrc and
-        * pfit state, we'll end up with a big fb scanned out into the wrong
-        * sized surface.
-        *
-        * To fix this properly, we need to hoist the checks up into
-        * compute_mode_changes (or above), check the actual pfit state and
-        * whether the platform allows pfit disable with pipe active, and only
-        * then update the pipesrc and pfit state, even on the flip path.
-        */
-       if (i915.fastboot) {
-               const struct drm_display_mode *adjusted_mode =
-                       &intel_crtc->config.adjusted_mode;
-
-               I915_WRITE(PIPESRC(intel_crtc->pipe),
-                          ((adjusted_mode->crtc_hdisplay - 1) << 16) |
-                          (adjusted_mode->crtc_vdisplay - 1));
-               if (!intel_crtc->config.pch_pfit.enabled &&
-                   (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
-                    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
-                       I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
-                       I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
-                       I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
-               }
-               intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
-               intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
-       }
-
        dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
        if (intel_crtc->active)
@@ -3472,14 +3618,13 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
                                       !intel_crtc_has_pending_flip(crtc),
                                       60*HZ) == 0)) {
                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-               unsigned long flags;
 
-               spin_lock_irqsave(&dev->event_lock, flags);
+               spin_lock_irq(&dev->event_lock);
                if (intel_crtc->unpin_work) {
                        WARN_ONCE(1, "Removing stuck page flip\n");
                        page_flip_completed(intel_crtc);
                }
-               spin_unlock_irqrestore(&dev->event_lock, flags);
+               spin_unlock_irq(&dev->event_lock);
        }
 
        if (crtc->primary->fb) {
@@ -3704,9 +3849,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
        intel_fdi_normal_train(crtc);
 
        /* For PCH DP, enable TRANS_DP_CTL */
-       if (HAS_PCH_CPT(dev) &&
-           (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
-            intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+       if (HAS_PCH_CPT(dev) && intel_crtc->config.has_dp_encoder) {
                u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
                reg = TRANS_DP_CTL(pipe);
                temp = I915_READ(reg);
@@ -3766,12 +3909,13 @@ void intel_put_shared_dpll(struct intel_crtc *crtc)
        if (pll == NULL)
                return;
 
-       if (pll->refcount == 0) {
-               WARN(1, "bad %s refcount\n", pll->name);
+       if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
+               WARN(1, "bad %s crtc mask\n", pll->name);
                return;
        }
 
-       if (--pll->refcount == 0) {
+       pll->config.crtc_mask &= ~(1 << crtc->pipe);
+       if (pll->config.crtc_mask == 0) {
                WARN_ON(pll->on);
                WARN_ON(pll->active);
        }
@@ -3782,15 +3926,9 @@ void intel_put_shared_dpll(struct intel_crtc *crtc)
 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-       struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+       struct intel_shared_dpll *pll;
        enum intel_dpll_id i;
 
-       if (pll) {
-               DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
-                             crtc->base.base.id, pll->name);
-               intel_put_shared_dpll(crtc);
-       }
-
        if (HAS_PCH_IBX(dev_priv->dev)) {
                /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
                i = (enum intel_dpll_id) crtc->pipe;
@@ -3799,7 +3937,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
                DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
                              crtc->base.base.id, pll->name);
 
-               WARN_ON(pll->refcount);
+               WARN_ON(pll->new_config->crtc_mask);
 
                goto found;
        }
@@ -3808,15 +3946,16 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
                pll = &dev_priv->shared_dplls[i];
 
                /* Only want to check enabled timings first */
-               if (pll->refcount == 0)
+               if (pll->new_config->crtc_mask == 0)
                        continue;
 
-               if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
-                          sizeof(pll->hw_state)) == 0) {
-                       DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
-                                     crtc->base.base.id,
-                                     pll->name, pll->refcount, pll->active);
-
+               if (memcmp(&crtc->new_config->dpll_hw_state,
+                          &pll->new_config->hw_state,
+                          sizeof(pll->new_config->hw_state)) == 0) {
+                       DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
+                                     crtc->base.base.id, pll->name,
+                                     pll->new_config->crtc_mask,
+                                     pll->active);
                        goto found;
                }
        }
@@ -3824,7 +3963,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
        /* Ok no matching timings, maybe there's a free one? */
        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
                pll = &dev_priv->shared_dplls[i];
-               if (pll->refcount == 0) {
+               if (pll->new_config->crtc_mask == 0) {
                        DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
                                      crtc->base.base.id, pll->name);
                        goto found;
@@ -3834,39 +3973,120 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
        return NULL;
 
 found:
-       if (pll->refcount == 0)
-               pll->hw_state = crtc->config.dpll_hw_state;
+       if (pll->new_config->crtc_mask == 0)
+               pll->new_config->hw_state = crtc->new_config->dpll_hw_state;
 
-       crtc->config.shared_dpll = i;
+       crtc->new_config->shared_dpll = i;
        DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
                         pipe_name(crtc->pipe));
 
-       pll->refcount++;
+       pll->new_config->crtc_mask |= 1 << crtc->pipe;
 
        return pll;
 }
 
-static void cpt_verify_modeset(struct drm_device *dev, int pipe)
+/**
+ * intel_shared_dpll_start_config - start a new PLL staged config
+ * @dev_priv: DRM device
+ * @clear_pipes: mask of pipes that will have their PLLs freed
+ *
+ * Starts a new PLL staged config, copying the current config but
+ * releasing the references of pipes specified in clear_pipes.
+ */
+static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
+                                         unsigned clear_pipes)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int dslreg = PIPEDSL(pipe);
-       u32 temp;
+       struct intel_shared_dpll *pll;
+       enum intel_dpll_id i;
 
-       temp = I915_READ(dslreg);
-       udelay(500);
-       if (wait_for(I915_READ(dslreg) != temp, 5)) {
-               if (wait_for(I915_READ(dslreg) != temp, 5))
-                       DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
+       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+               pll = &dev_priv->shared_dplls[i];
+
+               pll->new_config = kmemdup(&pll->config, sizeof pll->config,
+                                         GFP_KERNEL);
+               if (!pll->new_config)
+                       goto cleanup;
+
+               pll->new_config->crtc_mask &= ~clear_pipes;
+       }
+
+       return 0;
+
+cleanup:
+       while (--i >= 0) {
+               pll = &dev_priv->shared_dplls[i];
+               kfree(pll->new_config);
+               pll->new_config = NULL;
        }
+
+       return -ENOMEM;
 }
 
-static void ironlake_pfit_enable(struct intel_crtc *crtc)
+static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipe = crtc->pipe;
+       struct intel_shared_dpll *pll;
+       enum intel_dpll_id i;
 
-       if (crtc->config.pch_pfit.enabled) {
+       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+               pll = &dev_priv->shared_dplls[i];
+
+               WARN_ON(pll->new_config == &pll->config);
+
+               pll->config = *pll->new_config;
+               kfree(pll->new_config);
+               pll->new_config = NULL;
+       }
+}
+
+static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
+{
+       struct intel_shared_dpll *pll;
+       enum intel_dpll_id i;
+
+       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+               pll = &dev_priv->shared_dplls[i];
+
+               WARN_ON(pll->new_config == &pll->config);
+
+               kfree(pll->new_config);
+               pll->new_config = NULL;
+       }
+}
+
+static void cpt_verify_modeset(struct drm_device *dev, int pipe)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int dslreg = PIPEDSL(pipe);
+       u32 temp;
+
+       temp = I915_READ(dslreg);
+       udelay(500);
+       if (wait_for(I915_READ(dslreg) != temp, 5)) {
+               if (wait_for(I915_READ(dslreg) != temp, 5))
+                       DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
+       }
+}
+
+static void skylake_pfit_enable(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int pipe = crtc->pipe;
+
+       if (crtc->config.pch_pfit.enabled) {
+               I915_WRITE(PS_CTL(pipe), PS_ENABLE);
+               I915_WRITE(PS_WIN_POS(pipe), crtc->config.pch_pfit.pos);
+               I915_WRITE(PS_WIN_SZ(pipe), crtc->config.pch_pfit.size);
+       }
+}
+
+static void ironlake_pfit_enable(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int pipe = crtc->pipe;
+
+       if (crtc->config.pch_pfit.enabled) {
                /* Force use of hard-coded filter coefficients
                 * as some pre-programmed values are broken,
                 * e.g. x201.
@@ -3983,7 +4203,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
                return;
 
        if (!HAS_PCH_SPLIT(dev_priv->dev)) {
-               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+               if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
                        assert_dsi_pll_enabled(dev_priv);
                else
                        assert_pll_enabled(dev_priv, pipe);
@@ -4038,10 +4258,6 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
 
-       assert_vblank_disabled(crtc);
-
-       drm_vblank_on(dev, pipe);
-
        intel_enable_primary_hw_plane(crtc->primary, crtc);
        intel_enable_planes(crtc);
        intel_crtc_update_cursor(crtc, true);
@@ -4087,10 +4303,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
         * consider this a flip to a NULL plane.
         */
        intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
-
-       drm_vblank_off(dev, pipe);
-
-       assert_vblank_disabled(crtc);
 }
 
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -4123,8 +4335,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 
        intel_crtc->active = true;
 
-       intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
-       intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+       intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->pre_enable)
@@ -4160,6 +4372,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        if (HAS_PCH_CPT(dev))
                cpt_verify_modeset(dev, intel_crtc->pipe);
 
+       assert_vblank_disabled(crtc);
+       drm_crtc_vblank_on(crtc);
+
        intel_crtc_enable_planes(crtc);
 }
 
@@ -4235,19 +4450,23 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
 
        intel_crtc->active = true;
 
-       intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->pre_enable)
                        encoder->pre_enable(encoder);
 
        if (intel_crtc->config.has_pch_encoder) {
-               intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
+               intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
+                                                     true);
                dev_priv->display.fdi_link_train(crtc);
        }
 
        intel_ddi_enable_pipe_clock(intel_crtc);
 
-       ironlake_pfit_enable(intel_crtc);
+       if (IS_SKYLAKE(dev))
+               skylake_pfit_enable(intel_crtc);
+       else
+               ironlake_pfit_enable(intel_crtc);
 
        /*
         * On ILK+ LUT must be loaded before the pipe is running but with
@@ -4272,12 +4491,30 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
                intel_opregion_notify_encoder(encoder, true);
        }
 
+       assert_vblank_disabled(crtc);
+       drm_crtc_vblank_on(crtc);
+
        /* If we change the relative order between pipe/planes enabling, we need
         * to change the workaround. */
        haswell_mode_set_planes_workaround(intel_crtc);
        intel_crtc_enable_planes(crtc);
 }
 
+static void skylake_pfit_disable(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int pipe = crtc->pipe;
+
+       /* To avoid upsetting the power well on haswell only disable the pfit if
+        * it's in use. The hw state code will make sure we get this right. */
+       if (crtc->config.pch_pfit.enabled) {
+               I915_WRITE(PS_CTL(pipe), 0);
+               I915_WRITE(PS_WIN_POS(pipe), 0);
+               I915_WRITE(PS_WIN_SZ(pipe), 0);
+       }
+}
+
 static void ironlake_pfit_disable(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
@@ -4307,11 +4544,14 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 
        intel_crtc_disable_planes(crtc);
 
+       drm_crtc_vblank_off(crtc);
+       assert_vblank_disabled(crtc);
+
        for_each_encoder_on_crtc(dev, crtc, encoder)
                encoder->disable(encoder);
 
        if (intel_crtc->config.has_pch_encoder)
-               intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
+               intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
        intel_disable_pipe(intel_crtc);
 
@@ -4325,7 +4565,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
                ironlake_fdi_disable(crtc);
 
                ironlake_disable_pch_transcoder(dev_priv, pipe);
-               intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
 
                if (HAS_PCH_CPT(dev)) {
                        /* disable TRANS_DP_CTL */
@@ -4369,13 +4608,17 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
 
        intel_crtc_disable_planes(crtc);
 
+       drm_crtc_vblank_off(crtc);
+       assert_vblank_disabled(crtc);
+
        for_each_encoder_on_crtc(dev, crtc, encoder) {
                intel_opregion_notify_encoder(encoder, false);
                encoder->disable(encoder);
        }
 
        if (intel_crtc->config.has_pch_encoder)
-               intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
+               intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
+                                                     false);
        intel_disable_pipe(intel_crtc);
 
        if (intel_crtc->config.dp_encoder_is_mst)
@@ -4383,13 +4626,15 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
 
        intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
-       ironlake_pfit_disable(intel_crtc);
+       if (IS_SKYLAKE(dev))
+               skylake_pfit_disable(intel_crtc);
+       else
+               ironlake_pfit_disable(intel_crtc);
 
        intel_ddi_disable_pipe_clock(intel_crtc);
 
        if (intel_crtc->config.has_pch_encoder) {
                lpt_disable_pch_transcoder(dev_priv);
-               intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
                intel_ddi_fdi_disable(crtc);
        }
 
@@ -4510,20 +4755,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
        return mask;
 }
 
-void intel_display_set_init_power(struct drm_i915_private *dev_priv,
-                                 bool enable)
-{
-       if (dev_priv->power_domains.init_power_on == enable)
-               return;
-
-       if (enable)
-               intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-       else
-               intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-
-       dev_priv->power_domains.init_power_on = enable;
-}
-
 static void modeset_update_crtc_power_domains(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4546,6 +4777,9 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
                        intel_display_power_get(dev_priv, domain);
        }
 
+       if (dev_priv->display.modeset_global_resources)
+               dev_priv->display.modeset_global_resources(dev);
+
        for_each_intel_crtc(dev, crtc) {
                enum intel_display_power_domain domain;
 
@@ -4577,7 +4811,7 @@ static void vlv_update_cdclk(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
-       DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
+       DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
                         dev_priv->vlv_cdclk_freq);
 
        /*
@@ -4616,10 +4850,9 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        if (cdclk == 400000) {
-               u32 divider, vco;
+               u32 divider;
 
-               vco = valleyview_get_vco(dev_priv);
-               divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
+               divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
 
                mutex_lock(&dev_priv->dpio_lock);
                /* adjust cdclk divider */
@@ -4698,8 +4931,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
                                 int max_pixclk)
 {
-       int vco = valleyview_get_vco(dev_priv);
-       int freq_320 = (vco <<  1) % 320000 != 0 ? 333333 : 320000;
+       int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
 
        /* FIXME: Punit isn't quite ready yet */
        if (IS_CHERRYVIEW(dev_priv->dev))
@@ -4768,18 +5000,30 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
        int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
 
        if (req_cdclk != dev_priv->vlv_cdclk_freq) {
+               /*
+                * FIXME: We can end up here with all power domains off, yet
+                * with a CDCLK frequency other than the minimum. To account
+                * for this take the PIPE-A power domain, which covers the HW
+                * blocks needed for the following programming. This can be
+                * removed once it's guaranteed that we get here either with
+                * the minimum CDCLK set, or the required power domains
+                * enabled.
+                */
+               intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+
                if (IS_CHERRYVIEW(dev))
                        cherryview_set_cdclk(dev, req_cdclk);
                else
                        valleyview_set_cdclk(dev, req_cdclk);
-       }
 
-       modeset_update_crtc_power_domains(dev);
+               intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
+       }
 }
 
 static void valleyview_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
@@ -4790,13 +5034,13 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
        if (intel_crtc->active)
                return;
 
-       is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
+       is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
 
        if (!is_dsi) {
                if (IS_CHERRYVIEW(dev))
-                       chv_prepare_pll(intel_crtc);
+                       chv_prepare_pll(intel_crtc, &intel_crtc->config);
                else
-                       vlv_prepare_pll(intel_crtc);
+                       vlv_prepare_pll(intel_crtc, &intel_crtc->config);
        }
 
        if (intel_crtc->config.has_dp_encoder)
@@ -4804,11 +5048,18 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 
        intel_set_pipe_timings(intel_crtc);
 
+       if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
+               struct drm_i915_private *dev_priv = dev->dev_private;
+
+               I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
+               I915_WRITE(CHV_CANVAS(pipe), 0);
+       }
+
        i9xx_set_pipeconf(intel_crtc);
 
        intel_crtc->active = true;
 
-       intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->pre_pll_enable)
@@ -4816,9 +5067,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 
        if (!is_dsi) {
                if (IS_CHERRYVIEW(dev))
-                       chv_enable_pll(intel_crtc);
+                       chv_enable_pll(intel_crtc, &intel_crtc->config);
                else
-                       vlv_enable_pll(intel_crtc);
+                       vlv_enable_pll(intel_crtc, &intel_crtc->config);
        }
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4835,10 +5086,13 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
        for_each_encoder_on_crtc(dev, crtc, encoder)
                encoder->enable(encoder);
 
+       assert_vblank_disabled(crtc);
+       drm_crtc_vblank_on(crtc);
+
        intel_crtc_enable_planes(crtc);
 
        /* Underruns don't raise interrupts, so check manually. */
-       i9xx_check_fifo_underruns(dev);
+       i9xx_check_fifo_underruns(dev_priv);
 }
 
 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
@@ -4853,6 +5107,7 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
 static void i9xx_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
@@ -4874,7 +5129,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
        intel_crtc->active = true;
 
        if (!IS_GEN2(dev))
-               intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->pre_enable)
@@ -4892,6 +5147,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
        for_each_encoder_on_crtc(dev, crtc, encoder)
                encoder->enable(encoder);
 
+       assert_vblank_disabled(crtc);
+       drm_crtc_vblank_on(crtc);
+
        intel_crtc_enable_planes(crtc);
 
        /*
@@ -4902,10 +5160,10 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
         * but leave the pipe running.
         */
        if (IS_GEN2(dev))
-               intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
        /* Underruns don't raise interrupts, so check manually. */
-       i9xx_check_fifo_underruns(dev);
+       i9xx_check_fifo_underruns(dev_priv);
 }
 
 static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -4941,7 +5199,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
         * but leave the pipe running.
         */
        if (IS_GEN2(dev))
-               intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
        /*
         * Vblank time updates from the shadow to live plane control register
@@ -4955,9 +5213,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
        intel_set_memory_cxsr(dev_priv, false);
        intel_crtc_disable_planes(crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               encoder->disable(encoder);
-
        /*
         * On gen2 planes are double buffered but the pipe isn't, so we must
         * wait for planes to fully turn off before disabling the pipe.
@@ -4966,6 +5221,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
         */
        intel_wait_for_vblank(dev, pipe);
 
+       drm_crtc_vblank_off(crtc);
+       assert_vblank_disabled(crtc);
+
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               encoder->disable(encoder);
+
        intel_disable_pipe(intel_crtc);
 
        i9xx_pfit_disable(intel_crtc);
@@ -4974,7 +5235,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
                if (encoder->post_disable)
                        encoder->post_disable(encoder);
 
-       if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
+       if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
                if (IS_CHERRYVIEW(dev))
                        chv_disable_pll(dev_priv, pipe);
                else if (IS_VALLEYVIEW(dev))
@@ -4984,7 +5245,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
        }
 
        if (!IS_GEN2(dev))
-               intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
        intel_crtc->active = false;
        intel_update_watermarks(crtc);
@@ -4998,36 +5259,6 @@ static void i9xx_crtc_off(struct drm_crtc *crtc)
 {
 }
 
-static void intel_crtc_update_sarea(struct drm_crtc *crtc,
-                                   bool enabled)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_master_private *master_priv;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
-
-       if (!dev->primary->master)
-               return;
-
-       master_priv = dev->primary->master->driver_priv;
-       if (!master_priv->sarea_priv)
-               return;
-
-       switch (pipe) {
-       case 0:
-               master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
-               master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
-               break;
-       case 1:
-               master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
-               master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
-               break;
-       default:
-               DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
-               break;
-       }
-}
-
 /* Master function to enable/disable CRTC and corresponding power wells */
 void intel_crtc_control(struct drm_crtc *crtc, bool enable)
 {
@@ -5071,8 +5302,6 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
                enable |= intel_encoder->connectors_active;
 
        intel_crtc_control(crtc, enable);
-
-       intel_crtc_update_sarea(crtc, enable);
 }
 
 static void intel_crtc_disable(struct drm_crtc *crtc)
@@ -5087,7 +5316,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
        WARN_ON(!crtc->enabled);
 
        dev_priv->display.crtc_disable(crtc);
-       intel_crtc_update_sarea(crtc, false);
        dev_priv->display.off(crtc);
 
        if (crtc->primary->fb) {
@@ -5326,11 +5554,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
                                     struct intel_crtc_config *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
 
        /* FIXME should check pixel clock limits on all platforms */
        if (INTEL_INFO(dev)->gen < 4) {
-               struct drm_i915_private *dev_priv = dev->dev_private;
                int clock_limit =
                        dev_priv->display.get_display_clock_speed(dev);
 
@@ -5357,7 +5585,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
         * - LVDS dual channel mode
         * - Double wide pipe
         */
-       if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+       if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
             intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
                pipe_config->pipe_src_w &= ~1;
 
@@ -5379,13 +5607,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
        if (HAS_IPS(dev))
                hsw_compute_ips_config(crtc, pipe_config);
 
-       /*
-        * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
-        * old clock survives for now.
-        */
-       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
-               pipe_config->shared_dpll = crtc->config.shared_dpll;
-
        if (pipe_config->has_pch_encoder)
                return ironlake_fdi_compute_config(crtc, pipe_config);
 
@@ -5395,7 +5616,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
 static int valleyview_get_display_clock_speed(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int vco = valleyview_get_vco(dev_priv);
        u32 val;
        int divider;
 
@@ -5403,6 +5623,9 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
        if (IS_CHERRYVIEW(dev))
                return 400000;
 
+       if (dev_priv->hpll_freq == 0)
+               dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
+
        mutex_lock(&dev_priv->dpio_lock);
        val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
        mutex_unlock(&dev_priv->dpio_lock);
@@ -5413,7 +5636,7 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
             (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
             "cdclk change in progress\n");
 
-       return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
+       return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
 }
 
 static int i945_get_display_clock_speed(struct drm_device *dev)
@@ -5545,15 +5768,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
                && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
 }
 
-static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
+static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int refclk;
 
        if (IS_VALLEYVIEW(dev)) {
                refclk = 100000;
-       } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+       } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
            intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
                refclk = dev_priv->vbt.lvds_ssc_freq;
                DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -5583,24 +5806,24 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
        u32 fp, fp2 = 0;
 
        if (IS_PINEVIEW(dev)) {
-               fp = pnv_dpll_compute_fp(&crtc->config.dpll);
+               fp = pnv_dpll_compute_fp(&crtc->new_config->dpll);
                if (reduced_clock)
                        fp2 = pnv_dpll_compute_fp(reduced_clock);
        } else {
-               fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
+               fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
                if (reduced_clock)
                        fp2 = i9xx_dpll_compute_fp(reduced_clock);
        }
 
-       crtc->config.dpll_hw_state.fp0 = fp;
+       crtc->new_config->dpll_hw_state.fp0 = fp;
 
        crtc->lowfreq_avail = false;
-       if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+       if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
            reduced_clock && i915.powersave) {
-               crtc->config.dpll_hw_state.fp1 = fp2;
+               crtc->new_config->dpll_hw_state.fp1 = fp2;
                crtc->lowfreq_avail = true;
        } else {
-               crtc->config.dpll_hw_state.fp1 = fp;
+               crtc->new_config->dpll_hw_state.fp1 = fp;
        }
 }
 
@@ -5689,7 +5912,8 @@ void intel_dp_set_m_n(struct intel_crtc *crtc)
                                                   &crtc->config.dp_m2_n2);
 }
 
-static void vlv_update_pll(struct intel_crtc *crtc)
+static void vlv_update_pll(struct intel_crtc *crtc,
+                          struct intel_crtc_config *pipe_config)
 {
        u32 dpll, dpll_md;
 
@@ -5704,14 +5928,15 @@ static void vlv_update_pll(struct intel_crtc *crtc)
        if (crtc->pipe == PIPE_B)
                dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
        dpll |= DPLL_VCO_ENABLE;
-       crtc->config.dpll_hw_state.dpll = dpll;
+       pipe_config->dpll_hw_state.dpll = dpll;
 
-       dpll_md = (crtc->config.pixel_multiplier - 1)
+       dpll_md = (pipe_config->pixel_multiplier - 1)
                << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-       crtc->config.dpll_hw_state.dpll_md = dpll_md;
+       pipe_config->dpll_hw_state.dpll_md = dpll_md;
 }
 
-static void vlv_prepare_pll(struct intel_crtc *crtc)
+static void vlv_prepare_pll(struct intel_crtc *crtc,
+                           const struct intel_crtc_config *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5722,11 +5947,11 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
 
        mutex_lock(&dev_priv->dpio_lock);
 
-       bestn = crtc->config.dpll.n;
-       bestm1 = crtc->config.dpll.m1;
-       bestm2 = crtc->config.dpll.m2;
-       bestp1 = crtc->config.dpll.p1;
-       bestp2 = crtc->config.dpll.p2;
+       bestn = pipe_config->dpll.n;
+       bestm1 = pipe_config->dpll.m1;
+       bestm2 = pipe_config->dpll.m2;
+       bestp1 = pipe_config->dpll.p1;
+       bestp2 = pipe_config->dpll.p2;
 
        /* See eDP HDMI DPIO driver vbios notes doc */
 
@@ -5763,17 +5988,16 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
 
        /* Set HBR and RBR LPF coefficients */
-       if (crtc->config.port_clock == 162000 ||
-           intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
-           intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
+       if (pipe_config->port_clock == 162000 ||
+           intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
+           intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
                vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
                                 0x009f0003);
        else
                vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
                                 0x00d0000f);
 
-       if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
-           intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
+       if (crtc->config.has_dp_encoder) {
                /* Use SSC source */
                if (pipe == PIPE_A)
                        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -5793,8 +6017,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
 
        coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
        coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
-       if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
-           intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+           intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
                coreclk |= 0x01000000;
        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
 
@@ -5802,19 +6026,21 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
        mutex_unlock(&dev_priv->dpio_lock);
 }
 
-static void chv_update_pll(struct intel_crtc *crtc)
+static void chv_update_pll(struct intel_crtc *crtc,
+                          struct intel_crtc_config *pipe_config)
 {
-       crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
+       pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
                DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
                DPLL_VCO_ENABLE;
        if (crtc->pipe != PIPE_A)
-               crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+               pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
-       crtc->config.dpll_hw_state.dpll_md =
-               (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+       pipe_config->dpll_hw_state.dpll_md =
+               (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
 }
 
-static void chv_prepare_pll(struct intel_crtc *crtc)
+static void chv_prepare_pll(struct intel_crtc *crtc,
+                           const struct intel_crtc_config *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5825,18 +6051,18 @@ static void chv_prepare_pll(struct intel_crtc *crtc)
        u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
        int refclk;
 
-       bestn = crtc->config.dpll.n;
-       bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
-       bestm1 = crtc->config.dpll.m1;
-       bestm2 = crtc->config.dpll.m2 >> 22;
-       bestp1 = crtc->config.dpll.p1;
-       bestp2 = crtc->config.dpll.p2;
+       bestn = pipe_config->dpll.n;
+       bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
+       bestm1 = pipe_config->dpll.m1;
+       bestm2 = pipe_config->dpll.m2 >> 22;
+       bestp1 = pipe_config->dpll.p1;
+       bestp2 = pipe_config->dpll.p2;
 
        /*
         * Enable Refclk and SSC
         */
        I915_WRITE(dpll_reg,
-                  crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+                  pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
 
        mutex_lock(&dev_priv->dpio_lock);
 
@@ -5864,7 +6090,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc)
                       (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
 
        /* Loop filter */
-       refclk = i9xx_get_refclk(&crtc->base, 0);
+       refclk = i9xx_get_refclk(crtc, 0);
        loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
                2 << DPIO_CHV_GAIN_CTRL_SHIFT;
        if (refclk == 100000)
@@ -5884,6 +6110,53 @@ static void chv_prepare_pll(struct intel_crtc *crtc)
        mutex_unlock(&dev_priv->dpio_lock);
 }
 
+/**
+ * vlv_force_pll_on - forcibly enable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ * @dpll: PLL configuration
+ *
+ * Enable the PLL for @pipe using the supplied @dpll config. To be used
+ * in cases where we need the PLL enabled even when @pipe is not going to
+ * be enabled.
+ */
+void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+                     const struct dpll *dpll)
+{
+       struct intel_crtc *crtc =
+               to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+       struct intel_crtc_config pipe_config = {
+               .pixel_multiplier = 1,
+               .dpll = *dpll,
+       };
+
+       if (IS_CHERRYVIEW(dev)) {
+               chv_update_pll(crtc, &pipe_config);
+               chv_prepare_pll(crtc, &pipe_config);
+               chv_enable_pll(crtc, &pipe_config);
+       } else {
+               vlv_update_pll(crtc, &pipe_config);
+               vlv_prepare_pll(crtc, &pipe_config);
+               vlv_enable_pll(crtc, &pipe_config);
+       }
+}
+
+/**
+ * vlv_force_pll_off - forcibly disable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe. To be used in cases where we need
+ * the PLL enabled even when @pipe is not going to be enabled.
+ */
+void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
+{
+       if (IS_CHERRYVIEW(dev))
+               chv_disable_pll(to_i915(dev), pipe);
+       else
+               vlv_disable_pll(to_i915(dev), pipe);
+}
+
 static void i9xx_update_pll(struct intel_crtc *crtc,
                            intel_clock_t *reduced_clock,
                            int num_connectors)
@@ -5892,29 +6165,29 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 dpll;
        bool is_sdvo;
-       struct dpll *clock = &crtc->config.dpll;
+       struct dpll *clock = &crtc->new_config->dpll;
 
        i9xx_update_pll_dividers(crtc, reduced_clock);
 
-       is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
-               intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
+       is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
+               intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
 
        dpll = DPLL_VGA_MODE_DIS;
 
-       if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
+       if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
                dpll |= DPLLB_MODE_LVDS;
        else
                dpll |= DPLLB_MODE_DAC_SERIAL;
 
        if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
-               dpll |= (crtc->config.pixel_multiplier - 1)
+               dpll |= (crtc->new_config->pixel_multiplier - 1)
                        << SDVO_MULTIPLIER_SHIFT_HIRES;
        }
 
        if (is_sdvo)
                dpll |= DPLL_SDVO_HIGH_SPEED;
 
-       if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
+       if (crtc->new_config->has_dp_encoder)
                dpll |= DPLL_SDVO_HIGH_SPEED;
 
        /* compute bitmask from p1 value */
@@ -5942,21 +6215,21 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
        if (INTEL_INFO(dev)->gen >= 4)
                dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
 
-       if (crtc->config.sdvo_tv_clock)
+       if (crtc->new_config->sdvo_tv_clock)
                dpll |= PLL_REF_INPUT_TVCLKINBC;
-       else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+       else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
                 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
        else
                dpll |= PLL_REF_INPUT_DREFCLK;
 
        dpll |= DPLL_VCO_ENABLE;
-       crtc->config.dpll_hw_state.dpll = dpll;
+       crtc->new_config->dpll_hw_state.dpll = dpll;
 
        if (INTEL_INFO(dev)->gen >= 4) {
-               u32 dpll_md = (crtc->config.pixel_multiplier - 1)
+               u32 dpll_md = (crtc->new_config->pixel_multiplier - 1)
                        << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-               crtc->config.dpll_hw_state.dpll_md = dpll_md;
+               crtc->new_config->dpll_hw_state.dpll_md = dpll_md;
        }
 }
 
@@ -5967,13 +6240,13 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 dpll;
-       struct dpll *clock = &crtc->config.dpll;
+       struct dpll *clock = &crtc->new_config->dpll;
 
        i9xx_update_pll_dividers(crtc, reduced_clock);
 
        dpll = DPLL_VGA_MODE_DIS;
 
-       if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
+       if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
                dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
        } else {
                if (clock->p1 == 2)
@@ -5984,17 +6257,17 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
                        dpll |= PLL_P2_DIVIDE_BY_4;
        }
 
-       if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
+       if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
                dpll |= DPLL_DVO_2X_MODE;
 
-       if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+       if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
                 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
        else
                dpll |= PLL_REF_INPUT_DREFCLK;
 
        dpll |= DPLL_VCO_ENABLE;
-       crtc->config.dpll_hw_state.dpll = dpll;
+       crtc->new_config->dpll_hw_state.dpll = dpll;
 }
 
 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
@@ -6018,7 +6291,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
                crtc_vtotal -= 1;
                crtc_vblank_end -= 1;
 
-               if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
+               if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
                        vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
                else
                        vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -6176,7 +6449,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 
        if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
                if (INTEL_INFO(dev)->gen < 4 ||
-                   intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
+                   intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
                        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
                else
                        pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -6190,13 +6463,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
        POSTING_READ(PIPECONF(intel_crtc->pipe));
 }
 
-static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
-                             int x, int y,
-                             struct drm_framebuffer *fb)
+static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int refclk, num_connectors = 0;
        intel_clock_t clock, reduced_clock;
        bool ok, has_reduced_clock = false;
@@ -6204,7 +6474,10 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
        struct intel_encoder *encoder;
        const intel_limit_t *limit;
 
-       for_each_encoder_on_crtc(dev, crtc, encoder) {
+       for_each_intel_encoder(dev, encoder) {
+               if (encoder->new_crtc != crtc)
+                       continue;
+
                switch (encoder->type) {
                case INTEL_OUTPUT_LVDS:
                        is_lvds = true;
@@ -6212,6 +6485,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                case INTEL_OUTPUT_DSI:
                        is_dsi = true;
                        break;
+               default:
+                       break;
                }
 
                num_connectors++;
@@ -6220,7 +6495,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
        if (is_dsi)
                return 0;
 
-       if (!intel_crtc->config.clock_set) {
+       if (!crtc->new_config->clock_set) {
                refclk = i9xx_get_refclk(crtc, num_connectors);
 
                /*
@@ -6231,7 +6506,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                 */
                limit = intel_limit(crtc, refclk);
                ok = dev_priv->display.find_dpll(limit, crtc,
-                                                intel_crtc->config.port_clock,
+                                                crtc->new_config->port_clock,
                                                 refclk, NULL, &clock);
                if (!ok) {
                        DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -6252,23 +6527,23 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                                                            &reduced_clock);
                }
                /* Compat-code for transition, will disappear. */
-               intel_crtc->config.dpll.n = clock.n;
-               intel_crtc->config.dpll.m1 = clock.m1;
-               intel_crtc->config.dpll.m2 = clock.m2;
-               intel_crtc->config.dpll.p1 = clock.p1;
-               intel_crtc->config.dpll.p2 = clock.p2;
+               crtc->new_config->dpll.n = clock.n;
+               crtc->new_config->dpll.m1 = clock.m1;
+               crtc->new_config->dpll.m2 = clock.m2;
+               crtc->new_config->dpll.p1 = clock.p1;
+               crtc->new_config->dpll.p2 = clock.p2;
        }
 
        if (IS_GEN2(dev)) {
-               i8xx_update_pll(intel_crtc,
+               i8xx_update_pll(crtc,
                                has_reduced_clock ? &reduced_clock : NULL,
                                num_connectors);
        } else if (IS_CHERRYVIEW(dev)) {
-               chv_update_pll(intel_crtc);
+               chv_update_pll(crtc, crtc->new_config);
        } else if (IS_VALLEYVIEW(dev)) {
-               vlv_update_pll(intel_crtc);
+               vlv_update_pll(crtc, crtc->new_config);
        } else {
-               i9xx_update_pll(intel_crtc,
+               i9xx_update_pll(crtc,
                                has_reduced_clock ? &reduced_clock : NULL,
                                num_connectors);
        }
@@ -6434,8 +6709,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t tmp;
 
-       if (!intel_display_power_enabled(dev_priv,
-                                        POWER_DOMAIN_PIPE(crtc->pipe)))
+       if (!intel_display_power_is_enabled(dev_priv,
+                                           POWER_DOMAIN_PIPE(crtc->pipe)))
                return false;
 
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -6540,6 +6815,8 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
                        if (enc_to_dig_port(&encoder->base)->port == PORT_A)
                                has_cpu_edp = true;
                        break;
+               default:
+                       break;
                }
        }
 
@@ -6844,6 +7121,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
                case INTEL_OUTPUT_ANALOG:
                        has_vga = true;
                        break;
+               default:
+                       break;
                }
        }
 
@@ -6872,11 +7151,16 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
        int num_connectors = 0;
        bool is_lvds = false;
 
-       for_each_encoder_on_crtc(dev, crtc, encoder) {
+       for_each_intel_encoder(dev, encoder) {
+               if (encoder->new_crtc != to_intel_crtc(crtc))
+                       continue;
+
                switch (encoder->type) {
                case INTEL_OUTPUT_LVDS:
                        is_lvds = true;
                        break;
+               default:
+                       break;
                }
                num_connectors++;
        }
@@ -7021,7 +7305,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
        I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
        POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
 
-       if (IS_BROADWELL(dev)) {
+       if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
                val = 0;
 
                switch (intel_crtc->config.pipe_bpp) {
@@ -7056,18 +7340,12 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *intel_encoder;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int refclk;
        const intel_limit_t *limit;
        bool ret, is_lvds = false;
 
-       for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
-               switch (intel_encoder->type) {
-               case INTEL_OUTPUT_LVDS:
-                       is_lvds = true;
-                       break;
-               }
-       }
+       is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS);
 
        refclk = ironlake_get_refclk(crtc);
 
@@ -7076,9 +7354,9 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
         * refclk, or FALSE.  The returned values represent the clock equation:
         * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
         */
-       limit = intel_limit(crtc, refclk);
-       ret = dev_priv->display.find_dpll(limit, crtc,
-                                         to_intel_crtc(crtc)->config.port_clock,
+       limit = intel_limit(intel_crtc, refclk);
+       ret = dev_priv->display.find_dpll(limit, intel_crtc,
+                                         intel_crtc->new_config->port_clock,
                                          refclk, NULL, clock);
        if (!ret)
                return false;
@@ -7091,7 +7369,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
                 * downclock feature.
                */
                *has_reduced_clock =
-                       dev_priv->display.find_dpll(limit, crtc,
+                       dev_priv->display.find_dpll(limit, intel_crtc,
                                                    dev_priv->lvds_downclock,
                                                    refclk, clock,
                                                    reduced_clock);
@@ -7128,7 +7406,10 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
        int factor, num_connectors = 0;
        bool is_lvds = false, is_sdvo = false;
 
-       for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+       for_each_intel_encoder(dev, intel_encoder) {
+               if (intel_encoder->new_crtc != to_intel_crtc(crtc))
+                       continue;
+
                switch (intel_encoder->type) {
                case INTEL_OUTPUT_LVDS:
                        is_lvds = true;
@@ -7137,6 +7418,8 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
                case INTEL_OUTPUT_HDMI:
                        is_sdvo = true;
                        break;
+               default:
+                       break;
                }
 
                num_connectors++;
@@ -7149,10 +7432,10 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
                     dev_priv->vbt.lvds_ssc_freq == 100000) ||
                    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
                        factor = 25;
-       } else if (intel_crtc->config.sdvo_tv_clock)
+       } else if (intel_crtc->new_config->sdvo_tv_clock)
                factor = 20;
 
-       if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
+       if (ironlake_needs_fb_cb_tune(&intel_crtc->new_config->dpll, factor))
                *fp |= FP_CB_TUNE;
 
        if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
@@ -7165,20 +7448,20 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
        else
                dpll |= DPLLB_MODE_DAC_SERIAL;
 
-       dpll |= (intel_crtc->config.pixel_multiplier - 1)
+       dpll |= (intel_crtc->new_config->pixel_multiplier - 1)
                << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
 
        if (is_sdvo)
                dpll |= DPLL_SDVO_HIGH_SPEED;
-       if (intel_crtc->config.has_dp_encoder)
+       if (intel_crtc->new_config->has_dp_encoder)
                dpll |= DPLL_SDVO_HIGH_SPEED;
 
        /* compute bitmask from p1 value */
-       dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+       dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
        /* also FPA1 */
-       dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+       dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 
-       switch (intel_crtc->config.dpll.p2) {
+       switch (intel_crtc->new_config->dpll.p2) {
        case 5:
                dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
                break;
@@ -7201,78 +7484,64 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
        return dpll | DPLL_VCO_ENABLE;
 }
 
-static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
-                                 int x, int y,
-                                 struct drm_framebuffer *fb)
+static int ironlake_crtc_compute_clock(struct intel_crtc *crtc)
 {
-       struct drm_device *dev = crtc->dev;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int num_connectors = 0;
+       struct drm_device *dev = crtc->base.dev;
        intel_clock_t clock, reduced_clock;
        u32 dpll = 0, fp = 0, fp2 = 0;
        bool ok, has_reduced_clock = false;
        bool is_lvds = false;
-       struct intel_encoder *encoder;
        struct intel_shared_dpll *pll;
 
-       for_each_encoder_on_crtc(dev, crtc, encoder) {
-               switch (encoder->type) {
-               case INTEL_OUTPUT_LVDS:
-                       is_lvds = true;
-                       break;
-               }
-
-               num_connectors++;
-       }
+       is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
 
        WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
             "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
 
-       ok = ironlake_compute_clocks(crtc, &clock,
+       ok = ironlake_compute_clocks(&crtc->base, &clock,
                                     &has_reduced_clock, &reduced_clock);
-       if (!ok && !intel_crtc->config.clock_set) {
+       if (!ok && !crtc->new_config->clock_set) {
                DRM_ERROR("Couldn't find PLL settings for mode!\n");
                return -EINVAL;
        }
        /* Compat-code for transition, will disappear. */
-       if (!intel_crtc->config.clock_set) {
-               intel_crtc->config.dpll.n = clock.n;
-               intel_crtc->config.dpll.m1 = clock.m1;
-               intel_crtc->config.dpll.m2 = clock.m2;
-               intel_crtc->config.dpll.p1 = clock.p1;
-               intel_crtc->config.dpll.p2 = clock.p2;
+       if (!crtc->new_config->clock_set) {
+               crtc->new_config->dpll.n = clock.n;
+               crtc->new_config->dpll.m1 = clock.m1;
+               crtc->new_config->dpll.m2 = clock.m2;
+               crtc->new_config->dpll.p1 = clock.p1;
+               crtc->new_config->dpll.p2 = clock.p2;
        }
 
        /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
-       if (intel_crtc->config.has_pch_encoder) {
-               fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
+       if (crtc->new_config->has_pch_encoder) {
+               fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
                if (has_reduced_clock)
                        fp2 = i9xx_dpll_compute_fp(&reduced_clock);
 
-               dpll = ironlake_compute_dpll(intel_crtc,
+               dpll = ironlake_compute_dpll(crtc,
                                             &fp, &reduced_clock,
                                             has_reduced_clock ? &fp2 : NULL);
 
-               intel_crtc->config.dpll_hw_state.dpll = dpll;
-               intel_crtc->config.dpll_hw_state.fp0 = fp;
+               crtc->new_config->dpll_hw_state.dpll = dpll;
+               crtc->new_config->dpll_hw_state.fp0 = fp;
                if (has_reduced_clock)
-                       intel_crtc->config.dpll_hw_state.fp1 = fp2;
+                       crtc->new_config->dpll_hw_state.fp1 = fp2;
                else
-                       intel_crtc->config.dpll_hw_state.fp1 = fp;
+                       crtc->new_config->dpll_hw_state.fp1 = fp;
 
-               pll = intel_get_shared_dpll(intel_crtc);
+               pll = intel_get_shared_dpll(crtc);
                if (pll == NULL) {
                        DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
-                                        pipe_name(intel_crtc->pipe));
+                                        pipe_name(crtc->pipe));
                        return -EINVAL;
                }
-       } else
-               intel_put_shared_dpll(intel_crtc);
+       }
 
        if (is_lvds && has_reduced_clock && i915.powersave)
-               intel_crtc->lowfreq_avail = true;
+               crtc->lowfreq_avail = true;
        else
-               intel_crtc->lowfreq_avail = false;
+               crtc->lowfreq_avail = false;
 
        return 0;
 }
@@ -7353,6 +7622,22 @@ static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
                                     &pipe_config->fdi_m_n, NULL);
 }
 
+static void skylake_get_pfit_config(struct intel_crtc *crtc,
+                                   struct intel_crtc_config *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t tmp;
+
+       tmp = I915_READ(PS_CTL(crtc->pipe));
+
+       if (tmp & PS_ENABLE) {
+               pipe_config->pch_pfit.enabled = true;
+               pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe));
+               pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe));
+       }
+}
+
 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
                                     struct intel_crtc_config *pipe_config)
 {
@@ -7444,8 +7729,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t tmp;
 
-       if (!intel_display_power_enabled(dev_priv,
-                                        POWER_DOMAIN_PIPE(crtc->pipe)))
+       if (!intel_display_power_is_enabled(dev_priv,
+                                           POWER_DOMAIN_PIPE(crtc->pipe)))
                return false;
 
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -7638,7 +7923,6 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 {
        uint32_t val;
-       unsigned long irqflags;
 
        val = I915_READ(LCPLL_CTL);
 
@@ -7658,10 +7942,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
         * to call special forcewake code that doesn't touch runtime PM and
         * doesn't enable the forcewake delayed work.
         */
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+       spin_lock_irq(&dev_priv->uncore.lock);
        if (dev_priv->uncore.forcewake_count++ == 0)
                dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+       spin_unlock_irq(&dev_priv->uncore.lock);
 
        if (val & LCPLL_POWER_DOWN_ALLOW) {
                val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -7692,10 +7976,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
        }
 
        /* See the big comment above. */
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+       spin_lock_irq(&dev_priv->uncore.lock);
        if (--dev_priv->uncore.forcewake_count == 0)
                dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+       spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
 /*
@@ -7757,28 +8041,36 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
        intel_prepare_ddi(dev);
 }
 
-static void snb_modeset_global_resources(struct drm_device *dev)
+static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
 {
-       modeset_update_crtc_power_domains(dev);
-}
+       if (!intel_ddi_pll_select(crtc))
+               return -EINVAL;
 
-static void haswell_modeset_global_resources(struct drm_device *dev)
-{
-       modeset_update_crtc_power_domains(dev);
+       crtc->lowfreq_avail = false;
+
+       return 0;
 }
 
-static int haswell_crtc_mode_set(struct drm_crtc *crtc,
-                                int x, int y,
-                                struct drm_framebuffer *fb)
+static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
+                               enum port port,
+                               struct intel_crtc_config *pipe_config)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-       if (!intel_ddi_pll_select(intel_crtc))
-               return -EINVAL;
+       u32 temp;
 
-       intel_crtc->lowfreq_avail = false;
+       temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
+       pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
 
-       return 0;
+       switch (pipe_config->ddi_pll_sel) {
+       case SKL_DPLL1:
+               pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
+               break;
+       case SKL_DPLL2:
+               pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
+               break;
+       case SKL_DPLL3:
+               pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
+               break;
+       }
 }
 
 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
@@ -7810,7 +8102,10 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
 
        port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
 
-       haswell_get_ddi_pll(dev_priv, port, pipe_config);
+       if (IS_SKYLAKE(dev))
+               skylake_get_ddi_pll(dev_priv, port, pipe_config);
+       else
+               haswell_get_ddi_pll(dev_priv, port, pipe_config);
 
        if (pipe_config->shared_dpll >= 0) {
                pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
@@ -7824,7 +8119,8 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
         * DDI E. So just check whether this pipe is wired to DDI E and whether
         * the PCH transcoder is on.
         */
-       if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+       if (INTEL_INFO(dev)->gen < 9 &&
+           (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
                pipe_config->has_pch_encoder = true;
 
                tmp = I915_READ(FDI_RX_CTL(PIPE_A));
@@ -7843,7 +8139,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
        enum intel_display_power_domain pfit_domain;
        uint32_t tmp;
 
-       if (!intel_display_power_enabled(dev_priv,
+       if (!intel_display_power_is_enabled(dev_priv,
                                         POWER_DOMAIN_PIPE(crtc->pipe)))
                return false;
 
@@ -7872,7 +8168,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
                        pipe_config->cpu_transcoder = TRANSCODER_EDP;
        }
 
-       if (!intel_display_power_enabled(dev_priv,
+       if (!intel_display_power_is_enabled(dev_priv,
                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
                return false;
 
@@ -7885,8 +8181,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
        intel_get_pipe_timings(crtc, pipe_config);
 
        pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
-       if (intel_display_power_enabled(dev_priv, pfit_domain))
-               ironlake_get_pfit_config(crtc, pipe_config);
+       if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
+               if (IS_SKYLAKE(dev))
+                       skylake_get_pfit_config(crtc, pipe_config);
+               else
+                       ironlake_get_pfit_config(crtc, pipe_config);
+       }
 
        if (IS_HASWELL(dev))
                pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
@@ -7902,315 +8202,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
        return true;
 }
 
-static struct {
-       int clock;
-       u32 config;
-} hdmi_audio_clock[] = {
-       { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
-       { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
-       { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
-       { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
-       { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
-       { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
-       { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
-       { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
-       { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
-       { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
-};
-
-/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
-static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
-               if (mode->clock == hdmi_audio_clock[i].clock)
-                       break;
-       }
-
-       if (i == ARRAY_SIZE(hdmi_audio_clock)) {
-               DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
-               i = 1;
-       }
-
-       DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
-                     hdmi_audio_clock[i].clock,
-                     hdmi_audio_clock[i].config);
-
-       return hdmi_audio_clock[i].config;
-}
-
-static bool intel_eld_uptodate(struct drm_connector *connector,
-                              int reg_eldv, uint32_t bits_eldv,
-                              int reg_elda, uint32_t bits_elda,
-                              int reg_edid)
-{
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
-       uint8_t *eld = connector->eld;
-       uint32_t i;
-
-       i = I915_READ(reg_eldv);
-       i &= bits_eldv;
-
-       if (!eld[0])
-               return !i;
-
-       if (!i)
-               return false;
-
-       i = I915_READ(reg_elda);
-       i &= ~bits_elda;
-       I915_WRITE(reg_elda, i);
-
-       for (i = 0; i < eld[2]; i++)
-               if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
-                       return false;
-
-       return true;
-}
-
-static void g4x_write_eld(struct drm_connector *connector,
-                         struct drm_crtc *crtc,
-                         struct drm_display_mode *mode)
-{
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
-       uint8_t *eld = connector->eld;
-       uint32_t eldv;
-       uint32_t len;
-       uint32_t i;
-
-       i = I915_READ(G4X_AUD_VID_DID);
-
-       if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
-               eldv = G4X_ELDV_DEVCL_DEVBLC;
-       else
-               eldv = G4X_ELDV_DEVCTG;
-
-       if (intel_eld_uptodate(connector,
-                              G4X_AUD_CNTL_ST, eldv,
-                              G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
-                              G4X_HDMIW_HDMIEDID))
-               return;
-
-       i = I915_READ(G4X_AUD_CNTL_ST);
-       i &= ~(eldv | G4X_ELD_ADDR);
-       len = (i >> 9) & 0x1f;          /* ELD buffer size */
-       I915_WRITE(G4X_AUD_CNTL_ST, i);
-
-       if (!eld[0])
-               return;
-
-       len = min_t(uint8_t, eld[2], len);
-       DRM_DEBUG_DRIVER("ELD size %d\n", len);
-       for (i = 0; i < len; i++)
-               I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
-
-       i = I915_READ(G4X_AUD_CNTL_ST);
-       i |= eldv;
-       I915_WRITE(G4X_AUD_CNTL_ST, i);
-}
-
-static void haswell_write_eld(struct drm_connector *connector,
-                             struct drm_crtc *crtc,
-                             struct drm_display_mode *mode)
-{
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
-       uint8_t *eld = connector->eld;
-       uint32_t eldv;
-       uint32_t i;
-       int len;
-       int pipe = to_intel_crtc(crtc)->pipe;
-       int tmp;
-
-       int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
-       int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
-       int aud_config = HSW_AUD_CFG(pipe);
-       int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
-
-       /* Audio output enable */
-       DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
-       tmp = I915_READ(aud_cntrl_st2);
-       tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
-       I915_WRITE(aud_cntrl_st2, tmp);
-       POSTING_READ(aud_cntrl_st2);
-
-       assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
-
-       /* Set ELD valid state */
-       tmp = I915_READ(aud_cntrl_st2);
-       DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
-       tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
-       I915_WRITE(aud_cntrl_st2, tmp);
-       tmp = I915_READ(aud_cntrl_st2);
-       DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
-
-       /* Enable HDMI mode */
-       tmp = I915_READ(aud_config);
-       DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
-       /* clear N_programing_enable and N_value_index */
-       tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
-       I915_WRITE(aud_config, tmp);
-
-       DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
-
-       eldv = AUDIO_ELD_VALID_A << (pipe * 4);
-
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
-               DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
-               eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
-               I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
-       } else {
-               I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
-       }
-
-       if (intel_eld_uptodate(connector,
-                              aud_cntrl_st2, eldv,
-                              aud_cntl_st, IBX_ELD_ADDRESS,
-                              hdmiw_hdmiedid))
-               return;
-
-       i = I915_READ(aud_cntrl_st2);
-       i &= ~eldv;
-       I915_WRITE(aud_cntrl_st2, i);
-
-       if (!eld[0])
-               return;
-
-       i = I915_READ(aud_cntl_st);
-       i &= ~IBX_ELD_ADDRESS;
-       I915_WRITE(aud_cntl_st, i);
-       i = (i >> 29) & DIP_PORT_SEL_MASK;              /* DIP_Port_Select, 0x1 = PortB */
-       DRM_DEBUG_DRIVER("port num:%d\n", i);
-
-       len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
-       DRM_DEBUG_DRIVER("ELD size %d\n", len);
-       for (i = 0; i < len; i++)
-               I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
-
-       i = I915_READ(aud_cntrl_st2);
-       i |= eldv;
-       I915_WRITE(aud_cntrl_st2, i);
-
-}
-
-static void ironlake_write_eld(struct drm_connector *connector,
-                              struct drm_crtc *crtc,
-                              struct drm_display_mode *mode)
-{
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
-       uint8_t *eld = connector->eld;
-       uint32_t eldv;
-       uint32_t i;
-       int len;
-       int hdmiw_hdmiedid;
-       int aud_config;
-       int aud_cntl_st;
-       int aud_cntrl_st2;
-       int pipe = to_intel_crtc(crtc)->pipe;
-
-       if (HAS_PCH_IBX(connector->dev)) {
-               hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
-               aud_config = IBX_AUD_CFG(pipe);
-               aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
-               aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
-       } else if (IS_VALLEYVIEW(connector->dev)) {
-               hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
-               aud_config = VLV_AUD_CFG(pipe);
-               aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
-               aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
-       } else {
-               hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
-               aud_config = CPT_AUD_CFG(pipe);
-               aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
-               aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
-       }
-
-       DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
-
-       if (IS_VALLEYVIEW(connector->dev))  {
-               struct intel_encoder *intel_encoder;
-               struct intel_digital_port *intel_dig_port;
-
-               intel_encoder = intel_attached_encoder(connector);
-               intel_dig_port = enc_to_dig_port(&intel_encoder->base);
-               i = intel_dig_port->port;
-       } else {
-               i = I915_READ(aud_cntl_st);
-               i = (i >> 29) & DIP_PORT_SEL_MASK;
-               /* DIP_Port_Select, 0x1 = PortB */
-       }
-
-       if (!i) {
-               DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
-               /* operate blindly on all ports */
-               eldv = IBX_ELD_VALIDB;
-               eldv |= IBX_ELD_VALIDB << 4;
-               eldv |= IBX_ELD_VALIDB << 8;
-       } else {
-               DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
-               eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
-       }
-
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
-               DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
-               eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
-               I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
-       } else {
-               I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
-       }
-
-       if (intel_eld_uptodate(connector,
-                              aud_cntrl_st2, eldv,
-                              aud_cntl_st, IBX_ELD_ADDRESS,
-                              hdmiw_hdmiedid))
-               return;
-
-       i = I915_READ(aud_cntrl_st2);
-       i &= ~eldv;
-       I915_WRITE(aud_cntrl_st2, i);
-
-       if (!eld[0])
-               return;
-
-       i = I915_READ(aud_cntl_st);
-       i &= ~IBX_ELD_ADDRESS;
-       I915_WRITE(aud_cntl_st, i);
-
-       len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
-       DRM_DEBUG_DRIVER("ELD size %d\n", len);
-       for (i = 0; i < len; i++)
-               I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
-
-       i = I915_READ(aud_cntrl_st2);
-       i |= eldv;
-       I915_WRITE(aud_cntrl_st2, i);
-}
-
-void intel_write_eld(struct drm_encoder *encoder,
-                    struct drm_display_mode *mode)
-{
-       struct drm_crtc *crtc = encoder->crtc;
-       struct drm_connector *connector;
-       struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       connector = drm_select_eld(encoder, mode);
-       if (!connector)
-               return;
-
-       DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
-                        connector->base.id,
-                        connector->name,
-                        connector->encoder->base.id,
-                        connector->encoder->name);
-
-       connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
-
-       if (dev_priv->display.write_eld)
-               dev_priv->display.write_eld(connector, crtc, mode);
-}
-
-static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
+static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8255,8 +8247,10 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
                intel_crtc->cursor_cntl = 0;
        }
 
-       if (intel_crtc->cursor_base != base)
+       if (intel_crtc->cursor_base != base) {
                I915_WRITE(_CURABASE, base);
+               intel_crtc->cursor_base = base;
+       }
 
        if (intel_crtc->cursor_size != size) {
                I915_WRITE(CURSIZE, size);
@@ -8296,9 +8290,13 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
                                return;
                }
                cntl |= pipe << 28; /* Connect to correct pipe */
+
+               if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+                       cntl |= CURSOR_PIPE_CSC_ENABLE;
        }
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-               cntl |= CURSOR_PIPE_CSC_ENABLE;
+
+       if (to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180))
+               cntl |= CURSOR_ROTATE_180;
 
        if (intel_crtc->cursor_cntl != cntl) {
                I915_WRITE(CURCNTR(pipe), cntl);
@@ -8309,6 +8307,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
        /* and commit changes on next vblank */
        I915_WRITE(CURBASE(pipe), base);
        POSTING_READ(CURBASE(pipe));
+
+       intel_crtc->cursor_base = base;
 }
 
 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -8355,11 +8355,17 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
 
        I915_WRITE(CURPOS(pipe), pos);
 
+       /* ILK+ do this automagically */
+       if (HAS_GMCH_DISPLAY(dev) &&
+               to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) {
+               base += (intel_crtc->cursor_height *
+                       intel_crtc->cursor_width - 1) * 4;
+       }
+
        if (IS_845G(dev) || IS_I865G(dev))
                i845_update_cursor(crtc, base);
        else
                i9xx_update_cursor(crtc, base);
-       intel_crtc->cursor_base = base;
 }
 
 static bool cursor_size_ok(struct drm_device *dev,
@@ -8399,22 +8405,15 @@ static bool cursor_size_ok(struct drm_device *dev,
        return true;
 }
 
-/*
- * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
- *
- * Note that the object's reference will be consumed if the update fails.  If
- * the update succeeds, the reference of the old object (if any) will be
- * consumed.
- */
 static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
                                     struct drm_i915_gem_object *obj,
                                     uint32_t width, uint32_t height)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum pipe pipe = intel_crtc->pipe;
-       unsigned old_width, stride;
+       unsigned old_width;
        uint32_t addr;
        int ret;
 
@@ -8426,30 +8425,11 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
                goto finish;
        }
 
-       /* Check for which cursor types we support */
-       if (!cursor_size_ok(dev, width, height)) {
-               DRM_DEBUG("Cursor dimension not supported\n");
-               return -EINVAL;
-       }
-
-       stride = roundup_pow_of_two(width) * 4;
-       if (obj->base.size < stride * height) {
-               DRM_DEBUG_KMS("buffer is too small\n");
-               ret = -ENOMEM;
-               goto fail;
-       }
-
        /* we only need to pin inside GTT if cursor is non-phy */
        mutex_lock(&dev->struct_mutex);
        if (!INTEL_INFO(dev)->cursor_needs_physical) {
                unsigned alignment;
 
-               if (obj->tiling_mode) {
-                       DRM_DEBUG_KMS("cursor cannot be tiled\n");
-                       ret = -EINVAL;
-                       goto fail_locked;
-               }
-
                /*
                 * Global gtt pte registers are special registers which actually
                 * forward writes to a chunk of system memory. Which means that
@@ -8516,17 +8496,15 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
                if (old_width != width)
                        intel_update_watermarks(crtc);
                intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
-       }
 
-       intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
+               intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
+       }
 
        return 0;
 fail_unpin:
        i915_gem_object_unpin_from_display_plane(obj);
 fail_locked:
        mutex_unlock(&dev->struct_mutex);
-fail:
-       drm_gem_object_unreference_unlocked(&obj->base);
        return ret;
 }
 
@@ -8561,7 +8539,7 @@ __intel_framebuffer_create(struct drm_device *dev,
 
        intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
        if (!intel_fb) {
-               drm_gem_object_unreference_unlocked(&obj->base);
+               drm_gem_object_unreference(&obj->base);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -8571,7 +8549,7 @@ __intel_framebuffer_create(struct drm_device *dev,
 
        return &intel_fb->base;
 err:
-       drm_gem_object_unreference_unlocked(&obj->base);
+       drm_gem_object_unreference(&obj->base);
        kfree(intel_fb);
 
        return ERR_PTR(ret);
@@ -8702,6 +8680,9 @@ retry:
                crtc = encoder->crtc;
 
                ret = drm_modeset_lock(&crtc->mutex, ctx);
+               if (ret)
+                       goto fail_unlock;
+               ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
                if (ret)
                        goto fail_unlock;
 
@@ -8739,6 +8720,9 @@ retry:
        }
 
        ret = drm_modeset_lock(&crtc->mutex, ctx);
+       if (ret)
+               goto fail_unlock;
+       ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
        if (ret)
                goto fail_unlock;
        intel_encoder->new_crtc = to_intel_crtc(crtc);
@@ -9023,35 +9007,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
        return mode;
 }
 
-static void intel_increase_pllclock(struct drm_device *dev,
-                                   enum pipe pipe)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int dpll_reg = DPLL(pipe);
-       int dpll;
-
-       if (!HAS_GMCH_DISPLAY(dev))
-               return;
-
-       if (!dev_priv->lvds_downclock_avail)
-               return;
-
-       dpll = I915_READ(dpll_reg);
-       if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
-               DRM_DEBUG_DRIVER("upclocking LVDS\n");
-
-               assert_panel_unlocked(dev_priv, pipe);
-
-               dpll &= ~DISPLAY_RATE_SELECT_FPA1;
-               I915_WRITE(dpll_reg, dpll);
-               intel_wait_for_vblank(dev, pipe);
-
-               dpll = I915_READ(dpll_reg);
-               if (dpll & DISPLAY_RATE_SELECT_FPA1)
-                       DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
-       }
-}
-
 static void intel_decrease_pllclock(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -9127,199 +9082,16 @@ out:
        intel_runtime_pm_put(dev_priv);
 }
 
-
-/**
- * intel_mark_fb_busy - mark given planes as busy
- * @dev: DRM device
- * @frontbuffer_bits: bits for the affected planes
- * @ring: optional ring for asynchronous commands
- *
- * This function gets called every time the screen contents change. It can be
- * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
- */
-static void intel_mark_fb_busy(struct drm_device *dev,
-                              unsigned frontbuffer_bits,
-                              struct intel_engine_cs *ring)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       enum pipe pipe;
-
-       if (!i915.powersave)
-               return;
-
-       for_each_pipe(dev_priv, pipe) {
-               if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
-                       continue;
-
-               intel_increase_pllclock(dev, pipe);
-               if (ring && intel_fbc_enabled(dev))
-                       ring->fbc_dirty = true;
-       }
-}
-
-/**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
- * @ring: set for asynchronous rendering
- *
- * This function gets called every time rendering on the given object starts and
- * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
- * until the rendering completes or a flip on this frontbuffer plane is
- * scheduled.
- */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-                            struct intel_engine_cs *ring)
-{
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
-       if (!obj->frontbuffer_bits)
-               return;
-
-       if (ring) {
-               mutex_lock(&dev_priv->fb_tracking.lock);
-               dev_priv->fb_tracking.busy_bits
-                       |= obj->frontbuffer_bits;
-               dev_priv->fb_tracking.flip_bits
-                       &= ~obj->frontbuffer_bits;
-               mutex_unlock(&dev_priv->fb_tracking.lock);
-       }
-
-       intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
-
-       intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
-}
-
-/**
- * intel_frontbuffer_flush - flush frontbuffer
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called every time rendering on the given planes has
- * completed and frontbuffer caching can be started again. Flushes will get
- * delayed if they're blocked by some oustanding asynchronous rendering.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flush(struct drm_device *dev,
-                            unsigned frontbuffer_bits)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       /* Delay flushing when rings are still busy.*/
-       mutex_lock(&dev_priv->fb_tracking.lock);
-       frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
-
-       intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
-
-       intel_edp_psr_flush(dev, frontbuffer_bits);
-
-       /*
-        * FIXME: Unconditional fbc flushing here is a rather gross hack and
-        * needs to be reworked into a proper frontbuffer tracking scheme like
-        * psr employs.
-        */
-       if (IS_BROADWELL(dev))
-               gen8_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
-}
-
-/**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
- * @retire: set when retiring asynchronous rendering
- *
- * This function gets called every time rendering on the given object has
- * completed and frontbuffer caching can be started again. If @retire is true
- * then any delayed flushes will be unblocked.
- */
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-                       bool retire)
-{
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned frontbuffer_bits;
-
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
-       if (!obj->frontbuffer_bits)
-               return;
-
-       frontbuffer_bits = obj->frontbuffer_bits;
-
-       if (retire) {
-               mutex_lock(&dev_priv->fb_tracking.lock);
-               /* Filter out new bits since rendering started. */
-               frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-
-               dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-               mutex_unlock(&dev_priv->fb_tracking.lock);
-       }
-
-       intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
-/**
- * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after scheduling a flip on @obj. The actual
- * frontbuffer flushing will be delayed until completion is signalled with
- * intel_frontbuffer_flip_complete. If an invalidate happens in between this
- * flush will be cancelled.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
-                                   unsigned frontbuffer_bits)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       mutex_lock(&dev_priv->fb_tracking.lock);
-       dev_priv->fb_tracking.flip_bits
-               |= frontbuffer_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
-}
-
-/**
- * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after the flip has been latched and will complete
- * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
-                                    unsigned frontbuffer_bits)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       mutex_lock(&dev_priv->fb_tracking.lock);
-       /* Mask any cancelled flips. */
-       frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
-       dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
-
-       intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
 static void intel_crtc_destroy(struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct intel_unpin_work *work;
-       unsigned long flags;
 
-       spin_lock_irqsave(&dev->event_lock, flags);
+       spin_lock_irq(&dev->event_lock);
        work = intel_crtc->unpin_work;
        intel_crtc->unpin_work = NULL;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       spin_unlock_irq(&dev->event_lock);
 
        if (work) {
                cancel_work_sync(&work->work);
@@ -9365,6 +9137,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
        if (intel_crtc == NULL)
                return;
 
+       /*
+        * This is called both by irq handlers and the reset code (to complete
+        * lost pageflips) so needs the full irqsave spinlocks.
+        */
        spin_lock_irqsave(&dev->event_lock, flags);
        work = intel_crtc->unpin_work;
 
@@ -9408,6 +9184,10 @@ static bool page_flip_finished(struct intel_crtc *crtc)
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       if (i915_reset_in_progress(&dev_priv->gpu_error) ||
+           crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+               return true;
+
        /*
         * The relevant registers doen't exist on pre-ctg.
         * As the flip done interrupt doesn't trigger for mmio
@@ -9446,7 +9226,12 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
                to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
        unsigned long flags;
 
-       /* NB: An MMIO update of the plane base pointer will also
+
+       /*
+        * This is called both by irq handlers and the reset code (to complete
+        * lost pageflips) so needs the full irqsave spinlocks.
+        *
+        * NB: An MMIO update of the plane base pointer will also
         * generate a page-flip completion irq, i.e. every modeset
         * is also accompanied by a spurious intel_prepare_page_flip().
         */
@@ -9736,115 +9521,128 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
        struct intel_framebuffer *intel_fb =
                to_intel_framebuffer(intel_crtc->base.primary->fb);
        struct drm_i915_gem_object *obj = intel_fb->obj;
+       bool atomic_update;
+       u32 start_vbl_count;
        u32 dspcntr;
        u32 reg;
 
        intel_mark_page_flip_active(intel_crtc);
 
+       atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
        reg = DSPCNTR(intel_crtc->plane);
        dspcntr = I915_READ(reg);
 
-       if (INTEL_INFO(dev)->gen >= 4) {
-               if (obj->tiling_mode != I915_TILING_NONE)
-                       dspcntr |= DISPPLANE_TILED;
-               else
-                       dspcntr &= ~DISPPLANE_TILED;
-       }
+       if (obj->tiling_mode != I915_TILING_NONE)
+               dspcntr |= DISPPLANE_TILED;
+       else
+               dspcntr &= ~DISPPLANE_TILED;
+
        I915_WRITE(reg, dspcntr);
 
        I915_WRITE(DSPSURF(intel_crtc->plane),
                   intel_crtc->unpin_work->gtt_offset);
        POSTING_READ(DSPSURF(intel_crtc->plane));
+
+       if (atomic_update)
+               intel_pipe_update_end(intel_crtc, start_vbl_count);
 }
 
-static int intel_postpone_flip(struct drm_i915_gem_object *obj)
+static void intel_mmio_flip_work_func(struct work_struct *work)
 {
+       struct intel_crtc *intel_crtc =
+               container_of(work, struct intel_crtc, mmio_flip.work);
        struct intel_engine_cs *ring;
-       int ret;
-
-       lockdep_assert_held(&obj->base.dev->struct_mutex);
-
-       if (!obj->last_write_seqno)
-               return 0;
-
-       ring = obj->ring;
-
-       if (i915_seqno_passed(ring->get_seqno(ring, true),
-                             obj->last_write_seqno))
-               return 0;
+       uint32_t seqno;
 
-       ret = i915_gem_check_olr(ring, obj->last_write_seqno);
-       if (ret)
-               return ret;
+       seqno = intel_crtc->mmio_flip.seqno;
+       ring = intel_crtc->mmio_flip.ring;
 
-       if (WARN_ON(!ring->irq_get(ring)))
-               return 0;
+       if (seqno)
+               WARN_ON(__i915_wait_seqno(ring, seqno,
+                                         intel_crtc->reset_counter,
+                                         false, NULL, NULL) != 0);
 
-       return 1;
+       intel_do_mmio_flip(intel_crtc);
 }
 
-void intel_notify_mmio_flip(struct intel_engine_cs *ring)
+static int intel_queue_mmio_flip(struct drm_device *dev,
+                                struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_i915_gem_object *obj,
+                                struct intel_engine_cs *ring,
+                                uint32_t flags)
 {
-       struct drm_i915_private *dev_priv = to_i915(ring->dev);
-       struct intel_crtc *intel_crtc;
-       unsigned long irq_flags;
-       u32 seqno;
-
-       seqno = ring->get_seqno(ring, false);
-
-       spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
-       for_each_intel_crtc(ring->dev, intel_crtc) {
-               struct intel_mmio_flip *mmio_flip;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-               mmio_flip = &intel_crtc->mmio_flip;
-               if (mmio_flip->seqno == 0)
-                       continue;
+       intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
+       intel_crtc->mmio_flip.ring = obj->ring;
 
-               if (ring->id != mmio_flip->ring_id)
-                       continue;
+       schedule_work(&intel_crtc->mmio_flip.work);
 
-               if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
-                       intel_do_mmio_flip(intel_crtc);
-                       mmio_flip->seqno = 0;
-                       ring->irq_put(ring);
-               }
-       }
-       spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+       return 0;
 }
 
-static int intel_queue_mmio_flip(struct drm_device *dev,
+static int intel_gen9_queue_flip(struct drm_device *dev,
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
                                 struct drm_i915_gem_object *obj,
                                 struct intel_engine_cs *ring,
                                 uint32_t flags)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       unsigned long irq_flags;
+       uint32_t plane = 0, stride;
        int ret;
 
-       if (WARN_ON(intel_crtc->mmio_flip.seqno))
-               return -EBUSY;
+       switch(intel_crtc->pipe) {
+       case PIPE_A:
+               plane = MI_DISPLAY_FLIP_SKL_PLANE_1_A;
+               break;
+       case PIPE_B:
+               plane = MI_DISPLAY_FLIP_SKL_PLANE_1_B;
+               break;
+       case PIPE_C:
+               plane = MI_DISPLAY_FLIP_SKL_PLANE_1_C;
+               break;
+       default:
+               WARN_ONCE(1, "unknown plane in flip command\n");
+               return -ENODEV;
+       }
 
-       ret = intel_postpone_flip(obj);
-       if (ret < 0)
-               return ret;
-       if (ret == 0) {
-               intel_do_mmio_flip(intel_crtc);
-               return 0;
+       switch (obj->tiling_mode) {
+       case I915_TILING_NONE:
+               stride = fb->pitches[0] >> 6;
+               break;
+       case I915_TILING_X:
+               stride = fb->pitches[0] >> 9;
+               break;
+       default:
+               WARN_ONCE(1, "unknown tiling in flip command\n");
+               return -ENODEV;
        }
 
-       spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
-       intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
-       intel_crtc->mmio_flip.ring_id = obj->ring->id;
-       spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+       ret = intel_ring_begin(ring, 10);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+       intel_ring_emit(ring, DERRMR);
+       intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+                               DERRMR_PIPEB_PRI_FLIP_DONE |
+                               DERRMR_PIPEC_PRI_FLIP_DONE));
+       intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
+                             MI_SRM_LRM_GLOBAL_GTT);
+       intel_ring_emit(ring, DERRMR);
+       intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+       intel_ring_emit(ring, 0);
+
+       intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane);
+       intel_ring_emit(ring, stride << 6 | obj->tiling_mode);
+       intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+
+       intel_mark_page_flip_active(intel_crtc);
+       __intel_ring_advance(ring);
 
-       /*
-        * Double check to catch cases where irq fired before
-        * mmio flip data was ready
-        */
-       intel_notify_mmio_flip(obj->ring);
        return 0;
 }
 
@@ -9903,18 +9701,19 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       unsigned long flags;
+
+       WARN_ON(!in_irq());
 
        if (crtc == NULL)
                return;
 
-       spin_lock_irqsave(&dev->event_lock, flags);
+       spin_lock(&dev->event_lock);
        if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
                WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
                         intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
                page_flip_completed(intel_crtc);
        }
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       spin_unlock(&dev->event_lock);
 }
 
 static int intel_crtc_page_flip(struct drm_crtc *crtc,
@@ -9930,7 +9729,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        enum pipe pipe = intel_crtc->pipe;
        struct intel_unpin_work *work;
        struct intel_engine_cs *ring;
-       unsigned long flags;
        int ret;
 
        /*
@@ -9971,7 +9769,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                goto free_work;
 
        /* We borrow the event spin lock for protecting unpin_work */
-       spin_lock_irqsave(&dev->event_lock, flags);
+       spin_lock_irq(&dev->event_lock);
        if (intel_crtc->unpin_work) {
                /* Before declaring the flip queue wedged, check if
                 * the hardware completed the operation behind our backs.
@@ -9981,7 +9779,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                        page_flip_completed(intel_crtc);
                } else {
                        DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
-                       spin_unlock_irqrestore(&dev->event_lock, flags);
+                       spin_unlock_irq(&dev->event_lock);
 
                        drm_crtc_vblank_put(crtc);
                        kfree(work);
@@ -9989,7 +9787,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                }
        }
        intel_crtc->unpin_work = work;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       spin_unlock_irq(&dev->event_lock);
 
        if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
                flush_workqueue(dev_priv->wq);
@@ -10027,7 +9825,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                ring = &dev_priv->ring[RCS];
        }
 
-       ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+       ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring);
        if (ret)
                goto cleanup_pending;
 
@@ -10076,9 +9874,9 @@ cleanup_pending:
        mutex_unlock(&dev->struct_mutex);
 
 cleanup:
-       spin_lock_irqsave(&dev->event_lock, flags);
+       spin_lock_irq(&dev->event_lock);
        intel_crtc->unpin_work = NULL;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       spin_unlock_irq(&dev->event_lock);
 
        drm_crtc_vblank_put(crtc);
 free_work:
@@ -10089,9 +9887,9 @@ out_hang:
                intel_crtc_wait_for_pending_flips(crtc);
                ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
                if (ret == 0 && event) {
-                       spin_lock_irqsave(&dev->event_lock, flags);
+                       spin_lock_irq(&dev->event_lock);
                        drm_send_vblank_event(dev, pipe, event);
-                       spin_unlock_irqrestore(&dev->event_lock, flags);
+                       spin_unlock_irq(&dev->event_lock);
                }
        }
        return ret;
@@ -10287,6 +10085,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
                      pipe_config->dp_m2_n2.link_n,
                      pipe_config->dp_m2_n2.tu);
 
+       DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
+                     pipe_config->has_audio,
+                     pipe_config->has_infoframe);
+
        DRM_DEBUG_KMS("requested mode:\n");
        drm_mode_debug_printmodeline(&pipe_config->requested_mode);
        DRM_DEBUG_KMS("adjusted mode:\n");
@@ -10348,6 +10150,48 @@ static bool check_encoder_cloning(struct intel_crtc *crtc)
        return true;
 }
 
+static bool check_digital_port_conflicts(struct drm_device *dev)
+{
+       struct intel_connector *connector;
+       unsigned int used_ports = 0;
+
+       /*
+        * Walk the connector list instead of the encoder
+        * list to detect the problem on ddi platforms
+        * where there's just one encoder per digital port.
+        */
+       list_for_each_entry(connector,
+                           &dev->mode_config.connector_list, base.head) {
+               struct intel_encoder *encoder = connector->new_encoder;
+
+               if (!encoder)
+                       continue;
+
+               WARN_ON(!encoder->new_crtc);
+
+               switch (encoder->type) {
+                       unsigned int port_mask;
+               case INTEL_OUTPUT_UNKNOWN:
+                       if (WARN_ON(!HAS_DDI(dev)))
+                               break;
+               case INTEL_OUTPUT_DISPLAYPORT:
+               case INTEL_OUTPUT_HDMI:
+               case INTEL_OUTPUT_EDP:
+                       port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
+
+                       /* the same port mustn't appear more than once */
+                       if (used_ports & port_mask)
+                               return false;
+
+                       used_ports |= port_mask;
+               default:
+                       break;
+               }
+       }
+
+       return true;
+}
+
 static struct intel_crtc_config *
 intel_modeset_pipe_config(struct drm_crtc *crtc,
                          struct drm_framebuffer *fb,
@@ -10364,6 +10208,11 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
                return ERR_PTR(-EINVAL);
        }
 
+       if (!check_digital_port_conflicts(dev)) {
+               DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+               return ERR_PTR(-EINVAL);
+       }
+
        pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
        if (!pipe_config)
                return ERR_PTR(-ENOMEM);
@@ -10569,10 +10418,13 @@ static bool intel_crtc_in_use(struct drm_crtc *crtc)
 static void
 intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_encoder *intel_encoder;
        struct intel_crtc *intel_crtc;
        struct drm_connector *connector;
 
+       intel_shared_dpll_commit(dev_priv);
+
        for_each_intel_encoder(dev, intel_encoder) {
                if (!intel_encoder->base.crtc)
                        continue;
@@ -10752,6 +10604,7 @@ intel_pipe_config_compare(struct drm_device *dev,
        if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
            IS_VALLEYVIEW(dev))
                PIPE_CONF_CHECK_I(limited_color_range);
+       PIPE_CONF_CHECK_I(has_infoframe);
 
        PIPE_CONF_CHECK_I(has_audio);
 
@@ -10808,6 +10661,9 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
        PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
        PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
+       PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
+       PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
+       PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
 
        if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
                PIPE_CONF_CHECK_I(pipe_bpp);
@@ -10825,6 +10681,56 @@ intel_pipe_config_compare(struct drm_device *dev,
        return true;
 }
 
+static void check_wm_state(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct skl_ddb_allocation hw_ddb, *sw_ddb;
+       struct intel_crtc *intel_crtc;
+       int plane;
+
+       if (INTEL_INFO(dev)->gen < 9)
+               return;
+
+       skl_ddb_get_hw_state(dev_priv, &hw_ddb);
+       sw_ddb = &dev_priv->wm.skl_hw.ddb;
+
+       for_each_intel_crtc(dev, intel_crtc) {
+               struct skl_ddb_entry *hw_entry, *sw_entry;
+               const enum pipe pipe = intel_crtc->pipe;
+
+               if (!intel_crtc->active)
+                       continue;
+
+               /* planes */
+               for_each_plane(pipe, plane) {
+                       hw_entry = &hw_ddb.plane[pipe][plane];
+                       sw_entry = &sw_ddb->plane[pipe][plane];
+
+                       if (skl_ddb_entry_equal(hw_entry, sw_entry))
+                               continue;
+
+                       DRM_ERROR("mismatch in DDB state pipe %c plane %d "
+                                 "(expected (%u,%u), found (%u,%u))\n",
+                                 pipe_name(pipe), plane + 1,
+                                 sw_entry->start, sw_entry->end,
+                                 hw_entry->start, hw_entry->end);
+               }
+
+               /* cursor */
+               hw_entry = &hw_ddb.cursor[pipe];
+               sw_entry = &sw_ddb->cursor[pipe];
+
+               if (skl_ddb_entry_equal(hw_entry, sw_entry))
+                       continue;
+
+               DRM_ERROR("mismatch in DDB state pipe %c cursor "
+                         "(expected (%u,%u), found (%u,%u))\n",
+                         pipe_name(pipe),
+                         sw_entry->start, sw_entry->end,
+                         hw_entry->start, hw_entry->end);
+       }
+}
+
 static void
 check_connector_state(struct drm_device *dev)
 {
@@ -10991,9 +10897,9 @@ check_shared_dpll_state(struct drm_device *dev)
 
                active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
 
-               WARN(pll->active > pll->refcount,
+               WARN(pll->active > hweight32(pll->config.crtc_mask),
                     "more active pll users than references: %i vs %i\n",
-                    pll->active, pll->refcount);
+                    pll->active, hweight32(pll->config.crtc_mask));
                WARN(pll->active && !pll->on,
                     "pll in active use but not on in sw tracking\n");
                WARN(pll->on && !pll->active,
@@ -11011,11 +10917,11 @@ check_shared_dpll_state(struct drm_device *dev)
                WARN(pll->active != active_crtcs,
                     "pll active crtcs mismatch (expected %i, found %i)\n",
                     pll->active, active_crtcs);
-               WARN(pll->refcount != enabled_crtcs,
+               WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
                     "pll enabled crtcs mismatch (expected %i, found %i)\n",
-                    pll->refcount, enabled_crtcs);
+                    hweight32(pll->config.crtc_mask), enabled_crtcs);
 
-               WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
+               WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
                                       sizeof(dpll_hw_state)),
                     "pll hw state mismatch\n");
        }
@@ -11024,6 +10930,7 @@ check_shared_dpll_state(struct drm_device *dev)
 void
 intel_modeset_check_state(struct drm_device *dev)
 {
+       check_wm_state(dev);
        check_connector_state(dev);
        check_encoder_state(dev);
        check_crtc_state(dev);
@@ -11074,50 +10981,67 @@ static void update_scanline_offset(struct intel_crtc *crtc)
 
                crtc->scanline_offset = vtotal - 1;
        } else if (HAS_DDI(dev) &&
-                  intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
+                  intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
                crtc->scanline_offset = 2;
        } else
                crtc->scanline_offset = 1;
 }
 
+static struct intel_crtc_config *
+intel_modeset_compute_config(struct drm_crtc *crtc,
+                            struct drm_display_mode *mode,
+                            struct drm_framebuffer *fb,
+                            unsigned *modeset_pipes,
+                            unsigned *prepare_pipes,
+                            unsigned *disable_pipes)
+{
+       struct intel_crtc_config *pipe_config = NULL;
+
+       intel_modeset_affected_pipes(crtc, modeset_pipes,
+                                    prepare_pipes, disable_pipes);
+
+       if ((*modeset_pipes) == 0)
+               goto out;
+
+       /*
+        * Note this needs changes when we start tracking multiple modes
+        * and crtcs.  At that point we'll need to compute the whole config
+        * (i.e. one pipe_config for each crtc) rather than just the one
+        * for this crtc.
+        */
+       pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
+       if (IS_ERR(pipe_config)) {
+               goto out;
+       }
+       intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+                              "[modeset]");
+
+out:
+       return pipe_config;
+}
+
 static int __intel_set_mode(struct drm_crtc *crtc,
                            struct drm_display_mode *mode,
-                           int x, int y, struct drm_framebuffer *fb)
+                           int x, int y, struct drm_framebuffer *fb,
+                           struct intel_crtc_config *pipe_config,
+                           unsigned modeset_pipes,
+                           unsigned prepare_pipes,
+                           unsigned disable_pipes)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_display_mode *saved_mode;
-       struct intel_crtc_config *pipe_config = NULL;
        struct intel_crtc *intel_crtc;
-       unsigned disable_pipes, prepare_pipes, modeset_pipes;
        int ret = 0;
 
        saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
        if (!saved_mode)
                return -ENOMEM;
 
-       intel_modeset_affected_pipes(crtc, &modeset_pipes,
-                                    &prepare_pipes, &disable_pipes);
-
        *saved_mode = crtc->mode;
 
-       /* Hack: Because we don't (yet) support global modeset on multiple
-        * crtcs, we don't keep track of the new mode for more than one crtc.
-        * Hence simply check whether any bit is set in modeset_pipes in all the
-        * pieces of code that are not yet converted to deal with mutliple crtcs
-        * changing their mode at the same time. */
-       if (modeset_pipes) {
-               pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
-               if (IS_ERR(pipe_config)) {
-                       ret = PTR_ERR(pipe_config);
-                       pipe_config = NULL;
-
-                       goto out;
-               }
-               intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
-                                      "[modeset]");
+       if (modeset_pipes)
                to_intel_crtc(crtc)->new_config = pipe_config;
-       }
 
        /*
         * See if the config requires any additional preparation, e.g.
@@ -11133,6 +11057,22 @@ static int __intel_set_mode(struct drm_crtc *crtc,
                prepare_pipes &= ~disable_pipes;
        }
 
+       if (dev_priv->display.crtc_compute_clock) {
+               unsigned clear_pipes = modeset_pipes | disable_pipes;
+
+               ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
+               if (ret)
+                       goto done;
+
+               for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+                       ret = dev_priv->display.crtc_compute_clock(intel_crtc);
+                       if (ret) {
+                               intel_shared_dpll_abort_config(dev_priv);
+                               goto done;
+                       }
+               }
+       }
+
        for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
                intel_crtc_disable(&intel_crtc->base);
 
@@ -11143,6 +11083,10 @@ static int __intel_set_mode(struct drm_crtc *crtc,
 
        /* crtc->mode is already used by the ->mode_set callbacks, hence we need
         * to set it here already despite that we pass it down the callchain.
+        *
+        * Note we'll need to fix this up when we start tracking multiple
+        * pipes; here we assume a single modeset_pipe and only track the
+        * single crtc and mode.
         */
        if (modeset_pipes) {
                crtc->mode = *mode;
@@ -11164,8 +11108,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
         * update the the output configuration. */
        intel_modeset_update_state(dev, prepare_pipes);
 
-       if (dev_priv->display.modeset_global_resources)
-               dev_priv->display.modeset_global_resources(dev);
+       modeset_update_crtc_power_domains(dev);
 
        /* Set up the DPLL and any encoders state that needs to adjust or depend
         * on the DPLL.
@@ -11176,9 +11119,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
                struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 
                mutex_lock(&dev->struct_mutex);
-               ret = intel_pin_and_fence_fb_obj(dev,
-                                                obj,
-                                                NULL);
+               ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
                if (ret != 0) {
                        DRM_ERROR("pin & fence failed\n");
                        mutex_unlock(&dev->struct_mutex);
@@ -11193,11 +11134,6 @@ static int __intel_set_mode(struct drm_crtc *crtc,
                crtc->primary->fb = fb;
                crtc->x = x;
                crtc->y = y;
-
-               ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
-                                                     x, y, fb);
-               if (ret)
-                       goto done;
        }
 
        /* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -11212,19 +11148,23 @@ done:
        if (ret && crtc->enabled)
                crtc->mode = *saved_mode;
 
-out:
        kfree(pipe_config);
        kfree(saved_mode);
        return ret;
 }
 
-static int intel_set_mode(struct drm_crtc *crtc,
-                         struct drm_display_mode *mode,
-                         int x, int y, struct drm_framebuffer *fb)
+static int intel_set_mode_pipes(struct drm_crtc *crtc,
+                               struct drm_display_mode *mode,
+                               int x, int y, struct drm_framebuffer *fb,
+                               struct intel_crtc_config *pipe_config,
+                               unsigned modeset_pipes,
+                               unsigned prepare_pipes,
+                               unsigned disable_pipes)
 {
        int ret;
 
-       ret = __intel_set_mode(crtc, mode, x, y, fb);
+       ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes,
+                              prepare_pipes, disable_pipes);
 
        if (ret == 0)
                intel_modeset_check_state(crtc->dev);
@@ -11232,6 +11172,26 @@ static int intel_set_mode(struct drm_crtc *crtc,
        return ret;
 }
 
+static int intel_set_mode(struct drm_crtc *crtc,
+                         struct drm_display_mode *mode,
+                         int x, int y, struct drm_framebuffer *fb)
+{
+       struct intel_crtc_config *pipe_config;
+       unsigned modeset_pipes, prepare_pipes, disable_pipes;
+
+       pipe_config = intel_modeset_compute_config(crtc, mode, fb,
+                                                  &modeset_pipes,
+                                                  &prepare_pipes,
+                                                  &disable_pipes);
+
+       if (IS_ERR(pipe_config))
+               return PTR_ERR(pipe_config);
+
+       return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
+                                   modeset_pipes, prepare_pipes,
+                                   disable_pipes);
+}
+
 void intel_crtc_restore_mode(struct drm_crtc *crtc)
 {
        intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
@@ -11560,6 +11520,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
        struct drm_device *dev;
        struct drm_mode_set save_set;
        struct intel_set_config *config;
+       struct intel_crtc_config *pipe_config;
+       unsigned modeset_pipes, prepare_pipes, disable_pipes;
        int ret;
 
        BUG_ON(!set);
@@ -11605,9 +11567,36 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
        if (ret)
                goto fail;
 
+       pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
+                                                  set->fb,
+                                                  &modeset_pipes,
+                                                  &prepare_pipes,
+                                                  &disable_pipes);
+       if (IS_ERR(pipe_config)) {
+               ret = PTR_ERR(pipe_config);
+               goto fail;
+       } else if (pipe_config) {
+               if (pipe_config->has_audio !=
+                   to_intel_crtc(set->crtc)->config.has_audio)
+                       config->mode_changed = true;
+
+               /* Force mode sets for any infoframe stuff */
+               if (pipe_config->has_infoframe ||
+                   to_intel_crtc(set->crtc)->config.has_infoframe)
+                       config->mode_changed = true;
+       }
+
+       /* set_mode will free it in the mode_changed case */
+       if (!config->mode_changed)
+               kfree(pipe_config);
+
+       intel_update_pipe_size(to_intel_crtc(set->crtc));
+
        if (config->mode_changed) {
-               ret = intel_set_mode(set->crtc, set->mode,
-                                    set->x, set->y, set->fb);
+               ret = intel_set_mode_pipes(set->crtc, set->mode,
+                                          set->x, set->y, set->fb, pipe_config,
+                                          modeset_pipes, prepare_pipes,
+                                          disable_pipes);
        } else if (config->fb_changed) {
                struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
 
@@ -11677,7 +11666,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
 {
        uint32_t val;
 
-       if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
                return false;
 
        val = I915_READ(PCH_DPLL(pll->id));
@@ -11691,8 +11680,8 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
                                  struct intel_shared_dpll *pll)
 {
-       I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
-       I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
+       I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
+       I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
 }
 
 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
@@ -11701,7 +11690,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
        /* PCH refclock must be enabled first */
        ibx_assert_pch_refclk_enabled(dev_priv);
 
-       I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+       I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
 
        /* Wait for the clocks to stabilize. */
        POSTING_READ(PCH_DPLL(pll->id));
@@ -11712,7 +11701,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
         *
         * So write it again.
         */
-       I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+       I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
        POSTING_READ(PCH_DPLL(pll->id));
        udelay(200);
 }
@@ -11811,161 +11800,195 @@ disable_unpin:
 }
 
 static int
-intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
-                            struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-                            unsigned int crtc_w, unsigned int crtc_h,
-                            uint32_t src_x, uint32_t src_y,
-                            uint32_t src_w, uint32_t src_h)
+intel_check_primary_plane(struct drm_plane *plane,
+                         struct intel_plane_state *state)
+{
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_rect *dest = &state->dst;
+       struct drm_rect *src = &state->src;
+       const struct drm_rect *clip = &state->clip;
+
+       return drm_plane_helper_check_update(plane, crtc, fb,
+                                            src, dest, clip,
+                                            DRM_PLANE_HELPER_NO_SCALING,
+                                            DRM_PLANE_HELPER_NO_SCALING,
+                                            false, true, &state->visible);
+}
+
+static int
+intel_prepare_primary_plane(struct drm_plane *plane,
+                           struct intel_plane_state *state)
 {
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_framebuffer *fb = state->fb;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum pipe pipe = intel_crtc->pipe;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
-       struct drm_rect dest = {
-               /* integer pixels */
-               .x1 = crtc_x,
-               .y1 = crtc_y,
-               .x2 = crtc_x + crtc_w,
-               .y2 = crtc_y + crtc_h,
-       };
-       struct drm_rect src = {
-               /* 16.16 fixed point */
-               .x1 = src_x,
-               .y1 = src_y,
-               .x2 = src_x + src_w,
-               .y2 = src_y + src_h,
-       };
-       const struct drm_rect clip = {
-               /* integer pixels */
-               .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
-               .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
-       };
-       const struct {
-               int crtc_x, crtc_y;
-               unsigned int crtc_w, crtc_h;
-               uint32_t src_x, src_y, src_w, src_h;
-       } orig = {
-               .crtc_x = crtc_x,
-               .crtc_y = crtc_y,
-               .crtc_w = crtc_w,
-               .crtc_h = crtc_h,
-               .src_x = src_x,
-               .src_y = src_y,
-               .src_w = src_w,
-               .src_h = src_h,
-       };
-       struct intel_plane *intel_plane = to_intel_plane(plane);
-       bool visible;
        int ret;
 
-       ret = drm_plane_helper_check_update(plane, crtc, fb,
-                                           &src, &dest, &clip,
-                                           DRM_PLANE_HELPER_NO_SCALING,
-                                           DRM_PLANE_HELPER_NO_SCALING,
-                                           false, true, &visible);
+       intel_crtc_wait_for_pending_flips(crtc);
 
-       if (ret)
-               return ret;
+       if (intel_crtc_has_pending_flip(crtc)) {
+               DRM_ERROR("pipe is still busy with an old pageflip\n");
+               return -EBUSY;
+       }
 
-       /*
-        * If the CRTC isn't enabled, we're just pinning the framebuffer,
-        * updating the fb pointer, and returning without touching the
-        * hardware.  This allows us to later do a drmModeSetCrtc with fb=-1 to
-        * turn on the display with all planes setup as desired.
-        */
-       if (!crtc->enabled) {
+       if (old_obj != obj) {
                mutex_lock(&dev->struct_mutex);
-
-               /*
-                * If we already called setplane while the crtc was disabled,
-                * we may have an fb pinned; unpin it.
-                */
-               if (plane->fb)
-                       intel_unpin_fb_obj(old_obj);
-
-               i915_gem_track_fb(old_obj, obj,
-                                 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
-
-               /* Pin and return without programming hardware */
-               ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+               ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
+               if (ret == 0)
+                       i915_gem_track_fb(old_obj, obj,
+                                         INTEL_FRONTBUFFER_PRIMARY(pipe));
                mutex_unlock(&dev->struct_mutex);
-
-               return ret;
+               if (ret != 0) {
+                       DRM_DEBUG_KMS("pin & fence failed\n");
+                       return ret;
+               }
        }
 
-       intel_crtc_wait_for_pending_flips(crtc);
+       return 0;
+}
 
-       /*
-        * If clipping results in a non-visible primary plane, we'll disable
-        * the primary plane.  Note that this is a bit different than what
-        * happens if userspace explicitly disables the plane by passing fb=0
-        * because plane->fb still gets set and pinned.
-        */
-       if (!visible) {
-               mutex_lock(&dev->struct_mutex);
+static void
+intel_commit_primary_plane(struct drm_plane *plane,
+                          struct intel_plane_state *state)
+{
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum pipe pipe = intel_crtc->pipe;
+       struct drm_framebuffer *old_fb = plane->fb;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       struct drm_rect *src = &state->src;
+
+       crtc->primary->fb = fb;
+       crtc->x = src->x1 >> 16;
+       crtc->y = src->y1 >> 16;
+
+       intel_plane->crtc_x = state->orig_dst.x1;
+       intel_plane->crtc_y = state->orig_dst.y1;
+       intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
+       intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
+       intel_plane->src_x = state->orig_src.x1;
+       intel_plane->src_y = state->orig_src.y1;
+       intel_plane->src_w = drm_rect_width(&state->orig_src);
+       intel_plane->src_h = drm_rect_height(&state->orig_src);
+       intel_plane->obj = obj;
 
+       if (intel_crtc->active) {
                /*
-                * Try to pin the new fb first so that we can bail out if we
-                * fail.
+                * FBC does not work on some platforms for rotated
+                * planes, so disable it when rotation is not 0 and
+                * update it when rotation is set back to 0.
+                *
+                * FIXME: This is redundant with the fbc update done in
+                * the primary plane enable function except that that
+                * one is done too late. We eventually need to unify
+                * this.
                 */
-               if (plane->fb != fb) {
-                       ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
-                       if (ret) {
-                               mutex_unlock(&dev->struct_mutex);
-                               return ret;
-                       }
+               if (intel_crtc->primary_enabled &&
+                   INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+                   dev_priv->fbc.plane == intel_crtc->plane &&
+                   intel_plane->rotation != BIT(DRM_ROTATE_0)) {
+                       intel_disable_fbc(dev);
                }
 
-               i915_gem_track_fb(old_obj, obj,
-                                 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
-
-               if (intel_crtc->primary_enabled)
-                       intel_disable_primary_hw_plane(plane, crtc);
+               if (state->visible) {
+                       bool was_enabled = intel_crtc->primary_enabled;
 
+                       /* FIXME: kill this fastboot hack */
+                       intel_update_pipe_size(intel_crtc);
 
-               if (plane->fb != fb)
-                       if (plane->fb)
-                               intel_unpin_fb_obj(old_obj);
+                       intel_crtc->primary_enabled = true;
 
-               mutex_unlock(&dev->struct_mutex);
+                       dev_priv->display.update_primary_plane(crtc, plane->fb,
+                                       crtc->x, crtc->y);
 
-       } else {
-               if (intel_crtc && intel_crtc->active &&
-                   intel_crtc->primary_enabled) {
                        /*
-                        * FBC does not work on some platforms for rotated
-                        * planes, so disable it when rotation is not 0 and
-                        * update it when rotation is set back to 0.
-                        *
-                        * FIXME: This is redundant with the fbc update done in
-                        * the primary plane enable function except that that
-                        * one is done too late. We eventually need to unify
-                        * this.
+                        * BDW signals flip done immediately if the plane
+                        * is disabled, even if the plane enable is already
+                        * armed to occur at the next vblank :(
                         */
-                       if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
-                           dev_priv->fbc.plane == intel_crtc->plane &&
-                           intel_plane->rotation != BIT(DRM_ROTATE_0)) {
-                               intel_disable_fbc(dev);
-                       }
+                       if (IS_BROADWELL(dev) && !was_enabled)
+                               intel_wait_for_vblank(dev, intel_crtc->pipe);
+               } else {
+                       /*
+                        * If clipping results in a non-visible primary plane,
+                        * we'll disable the primary plane.  Note that this is
+                        * a bit different than what happens if userspace
+                        * explicitly disables the plane by passing fb=0
+                        * because plane->fb still gets set and pinned.
+                        */
+                       intel_disable_primary_hw_plane(plane, crtc);
                }
-               ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
-               if (ret)
-                       return ret;
 
-               if (!intel_crtc->primary_enabled)
-                       intel_enable_primary_hw_plane(plane, crtc);
+               intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+
+               mutex_lock(&dev->struct_mutex);
+               intel_update_fbc(dev);
+               mutex_unlock(&dev->struct_mutex);
        }
 
-       intel_plane->crtc_x = orig.crtc_x;
-       intel_plane->crtc_y = orig.crtc_y;
-       intel_plane->crtc_w = orig.crtc_w;
-       intel_plane->crtc_h = orig.crtc_h;
-       intel_plane->src_x = orig.src_x;
-       intel_plane->src_y = orig.src_y;
-       intel_plane->src_w = orig.src_w;
-       intel_plane->src_h = orig.src_h;
-       intel_plane->obj = obj;
+       if (old_fb && old_fb != fb) {
+               if (intel_crtc->active)
+                       intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+               mutex_lock(&dev->struct_mutex);
+               intel_unpin_fb_obj(old_obj);
+               mutex_unlock(&dev->struct_mutex);
+       }
+}
+
+static int
+intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
+                            struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                            unsigned int crtc_w, unsigned int crtc_h,
+                            uint32_t src_x, uint32_t src_y,
+                            uint32_t src_w, uint32_t src_h)
+{
+       struct intel_plane_state state;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int ret;
+
+       state.crtc = crtc;
+       state.fb = fb;
+
+       /* sample coordinates in 16.16 fixed point */
+       state.src.x1 = src_x;
+       state.src.x2 = src_x + src_w;
+       state.src.y1 = src_y;
+       state.src.y2 = src_y + src_h;
+
+       /* integer pixels */
+       state.dst.x1 = crtc_x;
+       state.dst.x2 = crtc_x + crtc_w;
+       state.dst.y1 = crtc_y;
+       state.dst.y2 = crtc_y + crtc_h;
+
+       state.clip.x1 = 0;
+       state.clip.y1 = 0;
+       state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
+       state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+
+       state.orig_src = state.src;
+       state.orig_dst = state.dst;
+
+       ret = intel_check_primary_plane(plane, &state);
+       if (ret)
+               return ret;
+
+       ret = intel_prepare_primary_plane(plane, &state);
+       if (ret)
+               return ret;
+
+       intel_commit_primary_plane(plane, &state);
 
        return 0;
 }
@@ -12044,51 +12067,92 @@ intel_cursor_plane_disable(struct drm_plane *plane)
 }
 
 static int
-intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
-                         struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-                         unsigned int crtc_w, unsigned int crtc_h,
-                         uint32_t src_x, uint32_t src_y,
-                         uint32_t src_w, uint32_t src_h)
+intel_check_cursor_plane(struct drm_plane *plane,
+                        struct intel_plane_state *state)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
-       struct drm_rect dest = {
-               /* integer pixels */
-               .x1 = crtc_x,
-               .y1 = crtc_y,
-               .x2 = crtc_x + crtc_w,
-               .y2 = crtc_y + crtc_h,
-       };
-       struct drm_rect src = {
-               /* 16.16 fixed point */
-               .x1 = src_x,
-               .y1 = src_y,
-               .x2 = src_x + src_w,
-               .y2 = src_y + src_h,
-       };
-       const struct drm_rect clip = {
-               /* integer pixels */
-               .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
-               .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
-       };
-       bool visible;
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_device *dev = crtc->dev;
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_rect *dest = &state->dst;
+       struct drm_rect *src = &state->src;
+       const struct drm_rect *clip = &state->clip;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       int crtc_w, crtc_h;
+       unsigned stride;
        int ret;
 
        ret = drm_plane_helper_check_update(plane, crtc, fb,
-                                           &src, &dest, &clip,
+                                           src, dest, clip,
                                            DRM_PLANE_HELPER_NO_SCALING,
                                            DRM_PLANE_HELPER_NO_SCALING,
-                                           true, true, &visible);
+                                           true, true, &state->visible);
        if (ret)
                return ret;
 
-       crtc->cursor_x = crtc_x;
-       crtc->cursor_y = crtc_y;
+
+       /* if we want to turn off the cursor ignore width and height */
+       if (!obj)
+               return 0;
+
+       /* Check for which cursor types we support */
+       crtc_w = drm_rect_width(&state->orig_dst);
+       crtc_h = drm_rect_height(&state->orig_dst);
+       if (!cursor_size_ok(dev, crtc_w, crtc_h)) {
+               DRM_DEBUG("Cursor dimension not supported\n");
+               return -EINVAL;
+       }
+
+       stride = roundup_pow_of_two(crtc_w) * 4;
+       if (obj->base.size < stride * crtc_h) {
+               DRM_DEBUG_KMS("buffer is too small\n");
+               return -ENOMEM;
+       }
+
+       if (fb == crtc->cursor->fb)
+               return 0;
+
+       /* we only need to pin inside GTT if cursor is non-phy */
+       mutex_lock(&dev->struct_mutex);
+       if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
+               DRM_DEBUG_KMS("cursor cannot be tiled\n");
+               ret = -EINVAL;
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+static int
+intel_commit_cursor_plane(struct drm_plane *plane,
+                         struct intel_plane_state *state)
+{
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_framebuffer *fb = state->fb;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+       struct drm_i915_gem_object *obj = intel_fb->obj;
+       int crtc_w, crtc_h;
+
+       crtc->cursor_x = state->orig_dst.x1;
+       crtc->cursor_y = state->orig_dst.y1;
+
+       intel_plane->crtc_x = state->orig_dst.x1;
+       intel_plane->crtc_y = state->orig_dst.y1;
+       intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
+       intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
+       intel_plane->src_x = state->orig_src.x1;
+       intel_plane->src_y = state->orig_src.y1;
+       intel_plane->src_w = drm_rect_width(&state->orig_src);
+       intel_plane->src_h = drm_rect_height(&state->orig_src);
+       intel_plane->obj = obj;
+
        if (fb != crtc->cursor->fb) {
+               crtc_w = drm_rect_width(&state->orig_dst);
+               crtc_h = drm_rect_height(&state->orig_dst);
                return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
        } else {
-               intel_crtc_update_cursor(crtc, visible);
+               intel_crtc_update_cursor(crtc, state->visible);
 
                intel_frontbuffer_flip(crtc->dev,
                                       INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
@@ -12096,10 +12160,53 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
                return 0;
        }
 }
+
+static int
+intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+                         struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                         unsigned int crtc_w, unsigned int crtc_h,
+                         uint32_t src_x, uint32_t src_y,
+                         uint32_t src_w, uint32_t src_h)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_plane_state state;
+       int ret;
+
+       state.crtc = crtc;
+       state.fb = fb;
+
+       /* sample coordinates in 16.16 fixed point */
+       state.src.x1 = src_x;
+       state.src.x2 = src_x + src_w;
+       state.src.y1 = src_y;
+       state.src.y2 = src_y + src_h;
+
+       /* integer pixels */
+       state.dst.x1 = crtc_x;
+       state.dst.x2 = crtc_x + crtc_w;
+       state.dst.y1 = crtc_y;
+       state.dst.y2 = crtc_y + crtc_h;
+
+       state.clip.x1 = 0;
+       state.clip.y1 = 0;
+       state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
+       state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+
+       state.orig_src = state.src;
+       state.orig_dst = state.dst;
+
+       ret = intel_check_cursor_plane(plane, &state);
+       if (ret)
+               return ret;
+
+       return intel_commit_cursor_plane(plane, &state);
+}
+
 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
        .update_plane = intel_cursor_plane_update,
        .disable_plane = intel_cursor_plane_disable,
        .destroy = intel_plane_destroy,
+       .set_property = intel_plane_set_property,
 };
 
 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
@@ -12115,12 +12222,26 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
        cursor->max_downscale = 1;
        cursor->pipe = pipe;
        cursor->plane = pipe;
+       cursor->rotation = BIT(DRM_ROTATE_0);
 
        drm_universal_plane_init(dev, &cursor->base, 0,
                                 &intel_cursor_plane_funcs,
                                 intel_cursor_formats,
                                 ARRAY_SIZE(intel_cursor_formats),
                                 DRM_PLANE_TYPE_CURSOR);
+
+       if (INTEL_INFO(dev)->gen >= 4) {
+               if (!dev->mode_config.rotation_property)
+                       dev->mode_config.rotation_property =
+                               drm_mode_create_rotation_property(dev,
+                                                       BIT(DRM_ROTATE_0) |
+                                                       BIT(DRM_ROTATE_180));
+               if (dev->mode_config.rotation_property)
+                       drm_object_attach_property(&cursor->base.base,
+                               dev->mode_config.rotation_property,
+                               cursor->rotation);
+       }
+
        return &cursor->base;
 }
 
@@ -12176,6 +12297,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
        dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
        dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
 
+       INIT_WORK(&intel_crtc->mmio_flip.work, intel_mmio_flip_work_func);
+
        drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 
        WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
@@ -12196,7 +12319,7 @@ enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
 
        WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
-       if (!encoder)
+       if (!encoder || WARN_ON(!encoder->crtc))
                return INVALID_PIPE;
 
        return to_intel_crtc(encoder->crtc)->pipe;
@@ -12284,7 +12407,10 @@ static bool intel_crt_present(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (IS_ULT(dev))
+       if (INTEL_INFO(dev)->gen >= 9)
+               return false;
+
+       if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
                return false;
 
        if (IS_CHERRYVIEW(dev))
@@ -12428,7 +12554,7 @@ static void intel_setup_outputs(struct drm_device *dev)
        if (SUPPORTS_TV(dev))
                intel_tv_init(dev);
 
-       intel_edp_psr_init(dev);
+       intel_psr_init(dev);
 
        for_each_intel_encoder(dev, encoder) {
                encoder->base.possible_crtcs = encoder->crtc_mask;
@@ -12632,16 +12758,22 @@ static void intel_init_display(struct drm_device *dev)
        if (HAS_DDI(dev)) {
                dev_priv->display.get_pipe_config = haswell_get_pipe_config;
                dev_priv->display.get_plane_config = ironlake_get_plane_config;
-               dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
+               dev_priv->display.crtc_compute_clock =
+                       haswell_crtc_compute_clock;
                dev_priv->display.crtc_enable = haswell_crtc_enable;
                dev_priv->display.crtc_disable = haswell_crtc_disable;
                dev_priv->display.off = ironlake_crtc_off;
-               dev_priv->display.update_primary_plane =
-                       ironlake_update_primary_plane;
+               if (INTEL_INFO(dev)->gen >= 9)
+                       dev_priv->display.update_primary_plane =
+                               skylake_update_primary_plane;
+               else
+                       dev_priv->display.update_primary_plane =
+                               ironlake_update_primary_plane;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
                dev_priv->display.get_plane_config = ironlake_get_plane_config;
-               dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+               dev_priv->display.crtc_compute_clock =
+                       ironlake_crtc_compute_clock;
                dev_priv->display.crtc_enable = ironlake_crtc_enable;
                dev_priv->display.crtc_disable = ironlake_crtc_disable;
                dev_priv->display.off = ironlake_crtc_off;
@@ -12650,7 +12782,7 @@ static void intel_init_display(struct drm_device *dev)
        } else if (IS_VALLEYVIEW(dev)) {
                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
                dev_priv->display.get_plane_config = i9xx_get_plane_config;
-               dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+               dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
                dev_priv->display.crtc_enable = valleyview_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
                dev_priv->display.off = i9xx_crtc_off;
@@ -12659,7 +12791,7 @@ static void intel_init_display(struct drm_device *dev)
        } else {
                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
                dev_priv->display.get_plane_config = i9xx_get_plane_config;
-               dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+               dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
                dev_priv->display.crtc_enable = i9xx_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
                dev_priv->display.off = i9xx_crtc_off;
@@ -12696,31 +12828,20 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.get_display_clock_speed =
                        i830_get_display_clock_speed;
 
-       if (IS_G4X(dev)) {
-               dev_priv->display.write_eld = g4x_write_eld;
-       } else if (IS_GEN5(dev)) {
+       if (IS_GEN5(dev)) {
                dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
-               dev_priv->display.write_eld = ironlake_write_eld;
        } else if (IS_GEN6(dev)) {
                dev_priv->display.fdi_link_train = gen6_fdi_link_train;
-               dev_priv->display.write_eld = ironlake_write_eld;
-               dev_priv->display.modeset_global_resources =
-                       snb_modeset_global_resources;
        } else if (IS_IVYBRIDGE(dev)) {
                /* FIXME: detect B0+ stepping and use auto training */
                dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
-               dev_priv->display.write_eld = ironlake_write_eld;
                dev_priv->display.modeset_global_resources =
                        ivb_modeset_global_resources;
        } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                dev_priv->display.fdi_link_train = hsw_fdi_link_train;
-               dev_priv->display.write_eld = haswell_write_eld;
-               dev_priv->display.modeset_global_resources =
-                       haswell_modeset_global_resources;
        } else if (IS_VALLEYVIEW(dev)) {
                dev_priv->display.modeset_global_resources =
                        valleyview_modeset_global_resources;
-               dev_priv->display.write_eld = ironlake_write_eld;
        }
 
        /* Default just returns -ENODEV to indicate unsupported */
@@ -12747,6 +12868,9 @@ static void intel_init_display(struct drm_device *dev)
        case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
                dev_priv->display.queue_flip = intel_gen7_queue_flip;
                break;
+       case 9:
+               dev_priv->display.queue_flip = intel_gen9_queue_flip;
+               break;
        }
 
        intel_panel_init_backlight_funcs(dev);
@@ -12951,11 +13075,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
        intel_enable_gt_powersave(dev);
 }
 
-void intel_modeset_suspend_hw(struct drm_device *dev)
-{
-       intel_suspend_hw(dev);
-}
-
 void intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -12981,6 +13100,7 @@ void intel_modeset_init(struct drm_device *dev)
                return;
 
        intel_init_display(dev);
+       intel_init_audio(dev);
 
        if (IS_GEN2(dev)) {
                dev->mode_config.max_width = 2048;
@@ -13291,7 +13411,7 @@ void i915_redisable_vga(struct drm_device *dev)
         * level, just check if the power well is enabled instead of trying to
         * follow the "don't touch the power well if we don't need it" policy
         * the rest of the driver uses. */
-       if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
+       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
                return;
 
        i915_redisable_vga_power_on(dev);
@@ -13335,18 +13455,21 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
                struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
-               pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
+               pll->on = pll->get_hw_state(dev_priv, pll,
+                                           &pll->config.hw_state);
                pll->active = 0;
+               pll->config.crtc_mask = 0;
                for_each_intel_crtc(dev, crtc) {
-                       if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
+                       if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
                                pll->active++;
+                               pll->config.crtc_mask |= 1 << crtc->pipe;
+                       }
                }
-               pll->refcount = pll->active;
 
-               DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
-                             pll->name, pll->refcount, pll->on);
+               DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
+                             pll->name, pll->config.crtc_mask, pll->on);
 
-               if (pll->refcount)
+               if (pll->config.crtc_mask)
                        intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
        }
 
@@ -13436,7 +13559,9 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
                pll->on = false;
        }
 
-       if (HAS_PCH_SPLIT(dev))
+       if (IS_GEN9(dev))
+               skl_wm_get_hw_state(dev);
+       else if (HAS_PCH_SPLIT(dev))
                ilk_wm_get_hw_state(dev);
 
        if (force_restore) {
@@ -13450,8 +13575,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
                        struct drm_crtc *crtc =
                                dev_priv->pipe_to_crtc_mapping[pipe];
 
-                       __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
-                                        crtc->primary->fb);
+                       intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
+                                      crtc->primary->fb);
                }
        } else {
                intel_modeset_update_staged_output_state(dev);
@@ -13462,6 +13587,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
 
 void intel_modeset_gem_init(struct drm_device *dev)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *c;
        struct drm_i915_gem_object *obj;
 
@@ -13469,6 +13595,16 @@ void intel_modeset_gem_init(struct drm_device *dev)
        intel_init_gt_powersave(dev);
        mutex_unlock(&dev->struct_mutex);
 
+       /*
+        * There may be no VBT; and if the BIOS enabled SSC we can
+        * just keep using it to avoid unnecessary flicker.  Whereas if the
+        * BIOS isn't using it, don't assume it will work even if the VBT
+        * indicates as much.
+        */
+       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+               dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
+                                               DREF_SSC1_ENABLE);
+
        intel_modeset_init_hw(dev);
 
        intel_setup_overlay(dev);
@@ -13484,7 +13620,9 @@ void intel_modeset_gem_init(struct drm_device *dev)
                if (obj == NULL)
                        continue;
 
-               if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
+               if (intel_pin_and_fence_fb_obj(c->primary,
+                                              c->primary->fb,
+                                              NULL)) {
                        DRM_ERROR("failed to pin boot fb on pipe %d\n",
                                  to_intel_crtc(c)->pipe);
                        drm_framebuffer_unreference(c->primary->fb);
@@ -13492,6 +13630,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
                }
        }
        mutex_unlock(&dev->struct_mutex);
+
+       intel_backlight_register(dev);
 }
 
 void intel_connector_unregister(struct intel_connector *intel_connector)
@@ -13507,14 +13647,16 @@ void intel_modeset_cleanup(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_connector *connector;
 
+       intel_disable_gt_powersave(dev);
+
+       intel_backlight_unregister(dev);
+
        /*
         * Interrupts and polling as the first thing to avoid creating havoc.
-        * Too much stuff here (turning of rps, connectors, ...) would
+        * Too much stuff here (turning of connectors, ...) would
         * experience fancy races otherwise.
         */
-       drm_irq_uninstall(dev);
-       intel_hpd_cancel_work(dev_priv);
-       dev_priv->pm._irqs_disabled = true;
+       intel_irq_uninstall(dev_priv);
 
        /*
         * Due to the hpd irq storm handling the hotplug work can re-arm the
@@ -13528,8 +13670,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
 
        intel_disable_fbc(dev);
 
-       intel_disable_gt_powersave(dev);
-
        ironlake_teardown_rc6(dev);
 
        mutex_unlock(&dev->struct_mutex);
@@ -13669,8 +13809,8 @@ intel_display_capture_error_state(struct drm_device *dev)
 
        for_each_pipe(dev_priv, i) {
                error->pipe[i].power_domain_on =
-                       intel_display_power_enabled_unlocked(dev_priv,
-                                                          POWER_DOMAIN_PIPE(i));
+                       __intel_display_power_is_enabled(dev_priv,
+                                                        POWER_DOMAIN_PIPE(i));
                if (!error->pipe[i].power_domain_on)
                        continue;
 
@@ -13705,7 +13845,7 @@ intel_display_capture_error_state(struct drm_device *dev)
                enum transcoder cpu_transcoder = transcoders[i];
 
                error->transcoder[i].power_domain_on =
-                       intel_display_power_enabled_unlocked(dev_priv,
+                       __intel_display_power_is_enabled(dev_priv,
                                POWER_DOMAIN_TRANSCODER(cpu_transcoder));
                if (!error->transcoder[i].power_domain_on)
                        continue;
@@ -13789,9 +13929,8 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
 
        for_each_intel_crtc(dev, crtc) {
                struct intel_unpin_work *work;
-               unsigned long irqflags;
 
-               spin_lock_irqsave(&dev->event_lock, irqflags);
+               spin_lock_irq(&dev->event_lock);
 
                work = crtc->unpin_work;
 
@@ -13801,6 +13940,6 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
                        work->event = NULL;
                }
 
-               spin_unlock_irqrestore(&dev->event_lock, irqflags);
+               spin_unlock_irq(&dev->event_lock);
        }
 }
index 5ad45bfff3feba593460ffd3f47984b3ef6f5b0e..5cecc20efa71df1fc8171de1660fcd4d28b99e8b 100644 (file)
@@ -113,6 +113,9 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
 static void intel_dp_link_down(struct intel_dp *intel_dp);
 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
+static void vlv_steal_power_sequencer(struct drm_device *dev,
+                                     enum pipe pipe);
 
 int
 intel_dp_max_link_bw(struct intel_dp *intel_dp)
@@ -224,8 +227,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static uint32_t
-pack_aux(uint8_t *src, int src_bytes)
+uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
 {
        int     i;
        uint32_t v = 0;
@@ -237,8 +239,7 @@ pack_aux(uint8_t *src, int src_bytes)
        return v;
 }
 
-static void
-unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
 {
        int i;
        if (dst_bytes > 4)
@@ -283,12 +284,10 @@ intel_hrawclk(struct drm_device *dev)
 
 static void
 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
-                                   struct intel_dp *intel_dp,
-                                   struct edp_power_seq *out);
+                                   struct intel_dp *intel_dp);
 static void
 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
-                                             struct intel_dp *intel_dp,
-                                             struct edp_power_seq *out);
+                                             struct intel_dp *intel_dp);
 
 static void pps_lock(struct intel_dp *intel_dp)
 {
@@ -322,6 +321,66 @@ static void pps_unlock(struct intel_dp *intel_dp)
        intel_display_power_put(dev_priv, power_domain);
 }
 
+static void
+vlv_power_sequencer_kick(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum pipe pipe = intel_dp->pps_pipe;
+       bool pll_enabled;
+       uint32_t DP;
+
+       if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
+                "skipping pipe %c power seqeuncer kick due to port %c being active\n",
+                pipe_name(pipe), port_name(intel_dig_port->port)))
+               return;
+
+       DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
+                     pipe_name(pipe), port_name(intel_dig_port->port));
+
+       /* Preserve the BIOS-computed detected bit. This is
+        * supposed to be read-only.
+        */
+       DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+       DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+       DP |= DP_PORT_WIDTH(1);
+       DP |= DP_LINK_TRAIN_PAT_1;
+
+       if (IS_CHERRYVIEW(dev))
+               DP |= DP_PIPE_SELECT_CHV(pipe);
+       else if (pipe == PIPE_B)
+               DP |= DP_PIPEB_SELECT;
+
+       pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
+
+       /*
+        * The DPLL for the pipe must be enabled for this to work.
+        * So enable temporarily it if it's not already enabled.
+        */
+       if (!pll_enabled)
+               vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
+                                &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
+
+       /*
+        * Similar magic as in intel_dp_enable_port().
+        * We _must_ do this port enable + disable trick
+        * to make this power seqeuencer lock onto the port.
+        * Otherwise even VDD force bit won't work.
+        */
+       I915_WRITE(intel_dp->output_reg, DP);
+       POSTING_READ(intel_dp->output_reg);
+
+       I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
+       POSTING_READ(intel_dp->output_reg);
+
+       I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+       POSTING_READ(intel_dp->output_reg);
+
+       if (!pll_enabled)
+               vlv_force_pll_off(dev, pipe);
+}
+
 static enum pipe
 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 {
@@ -330,10 +389,13 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_encoder *encoder;
        unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
-       struct edp_power_seq power_seq;
+       enum pipe pipe;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
+       /* We should never land here with regular DP ports */
+       WARN_ON(!is_edp(intel_dp));
+
        if (intel_dp->pps_pipe != INVALID_PIPE)
                return intel_dp->pps_pipe;
 
@@ -359,18 +421,26 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
         * are two power sequencers and up to two eDP ports.
         */
        if (WARN_ON(pipes == 0))
-               return PIPE_A;
+               pipe = PIPE_A;
+       else
+               pipe = ffs(pipes) - 1;
 
-       intel_dp->pps_pipe = ffs(pipes) - 1;
+       vlv_steal_power_sequencer(dev, pipe);
+       intel_dp->pps_pipe = pipe;
 
        DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
                      pipe_name(intel_dp->pps_pipe),
                      port_name(intel_dig_port->port));
 
        /* init power sequencer on this pipe and port */
-       intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
-       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
-                                                     &power_seq);
+       intel_dp_init_panel_power_sequencer(dev, intel_dp);
+       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+
+       /*
+        * Even vdd force doesn't work until we've made
+        * the power sequencer lock in on the port.
+        */
+       vlv_power_sequencer_kick(intel_dp);
 
        return intel_dp->pps_pipe;
 }
@@ -425,7 +495,6 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct edp_power_seq power_seq;
        enum port port = intel_dig_port->port;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
@@ -453,9 +522,8 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
        DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
                      port_name(port), pipe_name(intel_dp->pps_pipe));
 
-       intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
-       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
-                                                     &power_seq);
+       intel_dp_init_panel_power_sequencer(dev, intel_dp);
+       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
 }
 
 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
@@ -550,6 +618,10 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
+       if (IS_VALLEYVIEW(dev) &&
+           intel_dp->pps_pipe == INVALID_PIPE)
+               return false;
+
        return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
 }
 
@@ -560,6 +632,10 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
+       if (IS_VALLEYVIEW(dev) &&
+           intel_dp->pps_pipe == INVALID_PIPE)
+               return false;
+
        return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
 }
 
@@ -661,6 +737,16 @@ static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
        return index ? 0 : 100;
 }
 
+static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+       /*
+        * SKL doesn't need us to program the AUX clock divider (Hardware will
+        * derive the clock from CDCLK automatically). We still implement the
+        * get_aux_clock_divider vfunc to plug-in into the existing code.
+        */
+       return index ? 0 : 1;
+}
+
 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
                                      bool has_aux_irq,
                                      int send_bytes,
@@ -691,9 +777,24 @@ static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
               (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
 }
 
+static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+                                     bool has_aux_irq,
+                                     int send_bytes,
+                                     uint32_t unused)
+{
+       return DP_AUX_CH_CTL_SEND_BUSY |
+              DP_AUX_CH_CTL_DONE |
+              (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+              DP_AUX_CH_CTL_TIME_OUT_ERROR |
+              DP_AUX_CH_CTL_TIME_OUT_1600us |
+              DP_AUX_CH_CTL_RECEIVE_ERROR |
+              (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+              DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+}
+
 static int
 intel_dp_aux_ch(struct intel_dp *intel_dp,
-               uint8_t *send, int send_bytes,
+               const uint8_t *send, int send_bytes,
                uint8_t *recv, int recv_size)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -760,7 +861,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                        /* Load the send data into the aux channel data registers */
                        for (i = 0; i < send_bytes; i += 4)
                                I915_WRITE(ch_data + i,
-                                          pack_aux(send + i, send_bytes - i));
+                                          intel_dp_pack_aux(send + i,
+                                                            send_bytes - i));
 
                        /* Send the command and wait for it to complete */
                        I915_WRITE(ch_ctl, send_ctl);
@@ -814,8 +916,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                recv_bytes = recv_size;
 
        for (i = 0; i < recv_bytes; i += 4)
-               unpack_aux(I915_READ(ch_data + i),
-                          recv + i, recv_bytes - i);
+               intel_dp_unpack_aux(I915_READ(ch_data + i),
+                                   recv + i, recv_bytes - i);
 
        ret = recv_bytes;
 out:
@@ -925,7 +1027,16 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
                BUG();
        }
 
-       if (!HAS_DDI(dev))
+       /*
+        * The AUX_CTL register is usually DP_CTL + 0x10.
+        *
+        * On Haswell and Broadwell though:
+        *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
+        *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
+        *
+        * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
+        */
+       if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
                intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
 
        intel_dp->aux.name = name;
@@ -962,6 +1073,33 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
        intel_connector_unregister(intel_connector);
 }
 
+static void
+skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
+{
+       u32 ctrl1;
+
+       pipe_config->ddi_pll_sel = SKL_DPLL0;
+       pipe_config->dpll_hw_state.cfgcr1 = 0;
+       pipe_config->dpll_hw_state.cfgcr2 = 0;
+
+       ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
+       switch (link_bw) {
+       case DP_LINK_BW_1_62:
+               ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
+                                             SKL_DPLL0);
+               break;
+       case DP_LINK_BW_2_7:
+               ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
+                                             SKL_DPLL0);
+               break;
+       case DP_LINK_BW_5_4:
+               ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
+                                             SKL_DPLL0);
+               break;
+       }
+       pipe_config->dpll_hw_state.ctrl1 = ctrl1;
+}
+
 static void
 hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
 {
@@ -1139,7 +1277,9 @@ found:
                                &pipe_config->dp_m2_n2);
        }
 
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+       if (IS_SKYLAKE(dev) && is_edp(intel_dp))
+               skl_edp_set_pll_config(pipe_config, intel_dp->link_bw);
+       else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
        else
                intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -1212,12 +1352,8 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
        intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
        intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
 
-       if (crtc->config.has_audio) {
-               DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
-                                pipe_name(crtc->pipe));
+       if (crtc->config.has_audio)
                intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
-               intel_write_eld(&encoder->base, adjusted_mode);
-       }
 
        /* Split out the IBX/CPU vs CPT settings */
 
@@ -1367,6 +1503,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
        if (!is_edp(intel_dp))
                return false;
 
+       cancel_delayed_work(&intel_dp->panel_vdd_work);
        intel_dp->want_panel_vdd = true;
 
        if (edp_have_panel_vdd(intel_dp))
@@ -1375,7 +1512,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
        power_domain = intel_display_port_power_domain(intel_encoder);
        intel_display_power_get(dev_priv, power_domain);
 
-       DRM_DEBUG_KMS("Turning eDP VDD on\n");
+       DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
+                     port_name(intel_dig_port->port));
 
        if (!edp_have_panel_power(intel_dp))
                wait_panel_power_cycle(intel_dp);
@@ -1394,7 +1532,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
         * If the panel wasn't on, delay before accessing aux channel
         */
        if (!edp_have_panel_power(intel_dp)) {
-               DRM_DEBUG_KMS("eDP was not running\n");
+               DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
+                             port_name(intel_dig_port->port));
                msleep(intel_dp->panel_power_up_delay);
        }
 
@@ -1419,7 +1558,8 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
        vdd = edp_panel_vdd_on(intel_dp);
        pps_unlock(intel_dp);
 
-       WARN(!vdd, "eDP VDD already requested on\n");
+       WARN(!vdd, "eDP port %c VDD already requested on\n",
+            port_name(dp_to_dig_port(intel_dp)->port));
 }
 
 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
@@ -1440,7 +1580,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
        if (!edp_have_panel_vdd(intel_dp))
                return;
 
-       DRM_DEBUG_KMS("Turning eDP VDD off\n");
+       DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
+                     port_name(intel_dig_port->port));
 
        pp = ironlake_get_pp_control(intel_dp);
        pp &= ~EDP_FORCE_VDD;
@@ -1501,7 +1642,8 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
        if (!is_edp(intel_dp))
                return;
 
-       WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
+       WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
+            port_name(dp_to_dig_port(intel_dp)->port));
 
        intel_dp->want_panel_vdd = false;
 
@@ -1511,40 +1653,25 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
                edp_panel_vdd_schedule_off(intel_dp);
 }
 
-/*
- * Must be paired with intel_edp_panel_vdd_on().
- * Nested calls to these functions are not allowed since
- * we drop the lock. Caller must use some higher level
- * locking to prevent nested calls from other threads.
- */
-static void intel_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
-{
-       if (!is_edp(intel_dp))
-               return;
-
-       pps_lock(intel_dp);
-       edp_panel_vdd_off(intel_dp, sync);
-       pps_unlock(intel_dp);
-}
-
-void intel_edp_panel_on(struct intel_dp *intel_dp)
+static void edp_panel_on(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp;
        u32 pp_ctrl_reg;
 
+       lockdep_assert_held(&dev_priv->pps_mutex);
+
        if (!is_edp(intel_dp))
                return;
 
-       DRM_DEBUG_KMS("Turn eDP power on\n");
+       DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
+                     port_name(dp_to_dig_port(intel_dp)->port));
 
-       pps_lock(intel_dp);
-
-       if (edp_have_panel_power(intel_dp)) {
-               DRM_DEBUG_KMS("eDP power already on\n");
-               goto out;
-       }
+       if (WARN(edp_have_panel_power(intel_dp),
+                "eDP port %c panel power already on\n",
+                port_name(dp_to_dig_port(intel_dp)->port)))
+               return;
 
        wait_panel_power_cycle(intel_dp);
 
@@ -1572,12 +1699,20 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
                I915_WRITE(pp_ctrl_reg, pp);
                POSTING_READ(pp_ctrl_reg);
        }
+}
+
+void intel_edp_panel_on(struct intel_dp *intel_dp)
+{
+       if (!is_edp(intel_dp))
+               return;
 
- out:
+       pps_lock(intel_dp);
+       edp_panel_on(intel_dp);
        pps_unlock(intel_dp);
 }
 
-void intel_edp_panel_off(struct intel_dp *intel_dp)
+
+static void edp_panel_off(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
@@ -1587,14 +1722,16 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
        u32 pp;
        u32 pp_ctrl_reg;
 
+       lockdep_assert_held(&dev_priv->pps_mutex);
+
        if (!is_edp(intel_dp))
                return;
 
-       DRM_DEBUG_KMS("Turn eDP power off\n");
-
-       pps_lock(intel_dp);
+       DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
+                     port_name(dp_to_dig_port(intel_dp)->port));
 
-       WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
+       WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
+            port_name(dp_to_dig_port(intel_dp)->port));
 
        pp = ironlake_get_pp_control(intel_dp);
        /* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -1615,7 +1752,15 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
        /* We got a reference when we enabled the VDD. */
        power_domain = intel_display_port_power_domain(intel_encoder);
        intel_display_power_put(dev_priv, power_domain);
+}
+
+void intel_edp_panel_off(struct intel_dp *intel_dp)
+{
+       if (!is_edp(intel_dp))
+               return;
 
+       pps_lock(intel_dp);
+       edp_panel_off(intel_dp);
        pps_unlock(intel_dp);
 }
 
@@ -1819,7 +1964,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
        u32 tmp;
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_enabled(dev_priv, power_domain))
+       if (!intel_display_power_is_enabled(dev_priv, power_domain))
                return false;
 
        tmp = I915_READ(intel_dp->output_reg);
@@ -1951,368 +2096,14 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
        }
 }
 
-static bool is_edp_psr(struct intel_dp *intel_dp)
-{
-       return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
-}
-
-static bool intel_edp_is_psr_enabled(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (!HAS_PSR(dev))
-               return false;
-
-       return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
-}
-
-static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
-                                   struct edp_vsc_psr *vsc_psr)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
-       u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
-       u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
-       uint32_t *data = (uint32_t *) vsc_psr;
-       unsigned int i;
-
-       /* As per BSPec (Pipe Video Data Island Packet), we need to disable
-          the video DIP being updated before program video DIP data buffer
-          registers for DIP being updated. */
-       I915_WRITE(ctl_reg, 0);
-       POSTING_READ(ctl_reg);
-
-       for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
-               if (i < sizeof(struct edp_vsc_psr))
-                       I915_WRITE(data_reg + i, *data++);
-               else
-                       I915_WRITE(data_reg + i, 0);
-       }
-
-       I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
-       POSTING_READ(ctl_reg);
-}
-
-static void intel_edp_psr_setup(struct intel_dp *intel_dp)
-{
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct edp_vsc_psr psr_vsc;
-
-       /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
-       memset(&psr_vsc, 0, sizeof(psr_vsc));
-       psr_vsc.sdp_header.HB0 = 0;
-       psr_vsc.sdp_header.HB1 = 0x7;
-       psr_vsc.sdp_header.HB2 = 0x2;
-       psr_vsc.sdp_header.HB3 = 0x8;
-       intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
-
-       /* Avoid continuous PSR exit by masking memup and hpd */
-       I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
-                  EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
-}
-
-static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t aux_clock_divider;
-       int precharge = 0x3;
-       int msg_size = 5;       /* Header(4) + Message(1) */
-       bool only_standby = false;
-
-       aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
-
-       if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
-               only_standby = true;
-
-       /* Enable PSR in sink */
-       if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
-               drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
-                                  DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
-       else
-               drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
-                                  DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
-
-       /* Setup AUX registers */
-       I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
-       I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
-       I915_WRITE(EDP_PSR_AUX_CTL(dev),
-                  DP_AUX_CH_CTL_TIME_OUT_400us |
-                  (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
-                  (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
-                  (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
-}
-
-static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t max_sleep_time = 0x1f;
-       uint32_t idle_frames = 1;
-       uint32_t val = 0x0;
-       const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
-       bool only_standby = false;
-
-       if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
-               only_standby = true;
-
-       if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
-               val |= EDP_PSR_LINK_STANDBY;
-               val |= EDP_PSR_TP2_TP3_TIME_0us;
-               val |= EDP_PSR_TP1_TIME_0us;
-               val |= EDP_PSR_SKIP_AUX_EXIT;
-               val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
-       } else
-               val |= EDP_PSR_LINK_DISABLE;
-
-       I915_WRITE(EDP_PSR_CTL(dev), val |
-                  (IS_BROADWELL(dev) ? 0 : link_entry_time) |
-                  max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
-                  idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
-                  EDP_PSR_ENABLE);
-}
-
-static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc = dig_port->base.base.crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-       lockdep_assert_held(&dev_priv->psr.lock);
-       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
-       WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
-       dev_priv->psr.source_ok = false;
-
-       if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
-               DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
-               return false;
-       }
-
-       if (!i915.enable_psr) {
-               DRM_DEBUG_KMS("PSR disable by flag\n");
-               return false;
-       }
-
-       /* Below limitations aren't valid for Broadwell */
-       if (IS_BROADWELL(dev))
-               goto out;
-
-       if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
-           S3D_ENABLE) {
-               DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
-               return false;
-       }
-
-       if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
-               DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
-               return false;
-       }
-
- out:
-       dev_priv->psr.source_ok = true;
-       return true;
-}
-
-static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
-{
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
-       WARN_ON(dev_priv->psr.active);
-       lockdep_assert_held(&dev_priv->psr.lock);
-
-       /* Enable PSR on the panel */
-       intel_edp_psr_enable_sink(intel_dp);
-
-       /* Enable PSR on the host */
-       intel_edp_psr_enable_source(intel_dp);
-
-       dev_priv->psr.active = true;
-}
-
-void intel_edp_psr_enable(struct intel_dp *intel_dp)
-{
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (!HAS_PSR(dev)) {
-               DRM_DEBUG_KMS("PSR not supported on this platform\n");
-               return;
-       }
-
-       if (!is_edp_psr(intel_dp)) {
-               DRM_DEBUG_KMS("PSR not supported by this panel\n");
-               return;
-       }
-
-       mutex_lock(&dev_priv->psr.lock);
-       if (dev_priv->psr.enabled) {
-               DRM_DEBUG_KMS("PSR already in use\n");
-               mutex_unlock(&dev_priv->psr.lock);
-               return;
-       }
-
-       dev_priv->psr.busy_frontbuffer_bits = 0;
-
-       /* Setup PSR once */
-       intel_edp_psr_setup(intel_dp);
-
-       if (intel_edp_psr_match_conditions(intel_dp))
-               dev_priv->psr.enabled = intel_dp;
-       mutex_unlock(&dev_priv->psr.lock);
-}
-
-void intel_edp_psr_disable(struct intel_dp *intel_dp)
-{
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       mutex_lock(&dev_priv->psr.lock);
-       if (!dev_priv->psr.enabled) {
-               mutex_unlock(&dev_priv->psr.lock);
-               return;
-       }
-
-       if (dev_priv->psr.active) {
-               I915_WRITE(EDP_PSR_CTL(dev),
-                          I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
-
-               /* Wait till PSR is idle */
-               if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
-                              EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
-                       DRM_ERROR("Timed out waiting for PSR Idle State\n");
-
-               dev_priv->psr.active = false;
-       } else {
-               WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
-       }
-
-       dev_priv->psr.enabled = NULL;
-       mutex_unlock(&dev_priv->psr.lock);
-
-       cancel_delayed_work_sync(&dev_priv->psr.work);
-}
-
-static void intel_edp_psr_work(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), psr.work.work);
-       struct intel_dp *intel_dp = dev_priv->psr.enabled;
-
-       mutex_lock(&dev_priv->psr.lock);
-       intel_dp = dev_priv->psr.enabled;
-
-       if (!intel_dp)
-               goto unlock;
-
-       /*
-        * The delayed work can race with an invalidate hence we need to
-        * recheck. Since psr_flush first clears this and then reschedules we
-        * won't ever miss a flush when bailing out here.
-        */
-       if (dev_priv->psr.busy_frontbuffer_bits)
-               goto unlock;
-
-       intel_edp_psr_do_enable(intel_dp);
-unlock:
-       mutex_unlock(&dev_priv->psr.lock);
-}
-
-static void intel_edp_psr_do_exit(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (dev_priv->psr.active) {
-               u32 val = I915_READ(EDP_PSR_CTL(dev));
-
-               WARN_ON(!(val & EDP_PSR_ENABLE));
-
-               I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
-
-               dev_priv->psr.active = false;
-       }
-
-}
-
-void intel_edp_psr_invalidate(struct drm_device *dev,
-                             unsigned frontbuffer_bits)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc;
-       enum pipe pipe;
-
-       mutex_lock(&dev_priv->psr.lock);
-       if (!dev_priv->psr.enabled) {
-               mutex_unlock(&dev_priv->psr.lock);
-               return;
-       }
-
-       crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
-       pipe = to_intel_crtc(crtc)->pipe;
-
-       intel_edp_psr_do_exit(dev);
-
-       frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
-
-       dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
-       mutex_unlock(&dev_priv->psr.lock);
-}
-
-void intel_edp_psr_flush(struct drm_device *dev,
-                        unsigned frontbuffer_bits)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc;
-       enum pipe pipe;
-
-       mutex_lock(&dev_priv->psr.lock);
-       if (!dev_priv->psr.enabled) {
-               mutex_unlock(&dev_priv->psr.lock);
-               return;
-       }
-
-       crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
-       pipe = to_intel_crtc(crtc)->pipe;
-       dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
-
-       /*
-        * On Haswell sprite plane updates don't result in a psr invalidating
-        * signal in the hardware. Which means we need to manually fake this in
-        * software for all flushes, not just when we've seen a preceding
-        * invalidation through frontbuffer rendering.
-        */
-       if (IS_HASWELL(dev) &&
-           (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
-               intel_edp_psr_do_exit(dev);
-
-       if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
-               schedule_delayed_work(&dev_priv->psr.work,
-                                     msecs_to_jiffies(100));
-       mutex_unlock(&dev_priv->psr.lock);
-}
-
-void intel_edp_psr_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
-       mutex_init(&dev_priv->psr.lock);
-}
-
 static void intel_disable_dp(struct intel_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+       if (crtc->config.has_audio)
+               intel_audio_codec_disable(encoder);
 
        /* Make sure the panel is off before trying to change the mode. But also
         * ensure that we have vdd while we switch off the panel. */
@@ -2467,14 +2258,23 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       intel_dp->DP |= DP_PORT_EN;
-
        /* enable with pattern 1 (as per spec) */
        _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
                                 DP_TRAINING_PATTERN_1);
 
        I915_WRITE(intel_dp->output_reg, intel_dp->DP);
        POSTING_READ(intel_dp->output_reg);
+
+       /*
+        * Magic for VLV/CHV. We _must_ first set up the register
+        * without actually enabling the port, and then do another
+        * write to enable the port. Otherwise link training will
+        * fail when the power sequencer is freshly used for this port.
+        */
+       intel_dp->DP |= DP_PORT_EN;
+
+       I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+       POSTING_READ(intel_dp->output_reg);
 }
 
 static void intel_enable_dp(struct intel_encoder *encoder)
@@ -2482,19 +2282,38 @@ static void intel_enable_dp(struct intel_encoder *encoder)
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        uint32_t dp_reg = I915_READ(intel_dp->output_reg);
 
        if (WARN_ON(dp_reg & DP_PORT_EN))
                return;
 
+       pps_lock(intel_dp);
+
+       if (IS_VALLEYVIEW(dev))
+               vlv_init_panel_power_sequencer(intel_dp);
+
        intel_dp_enable_port(intel_dp);
-       intel_edp_panel_vdd_on(intel_dp);
-       intel_edp_panel_on(intel_dp);
-       intel_edp_panel_vdd_off(intel_dp, true);
+
+       edp_panel_vdd_on(intel_dp);
+       edp_panel_on(intel_dp);
+       edp_panel_vdd_off(intel_dp, true);
+
+       pps_unlock(intel_dp);
+
+       if (IS_VALLEYVIEW(dev))
+               vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
+
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
        intel_dp_start_link_train(intel_dp);
        intel_dp_complete_link_train(intel_dp);
        intel_dp_stop_link_train(intel_dp);
+
+       if (crtc->config.has_audio) {
+               DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
+                                pipe_name(crtc->pipe));
+               intel_audio_codec_enable(encoder);
+       }
 }
 
 static void g4x_enable_dp(struct intel_encoder *encoder)
@@ -2526,6 +2345,32 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
        }
 }
 
+static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
+       enum pipe pipe = intel_dp->pps_pipe;
+       int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+
+       edp_panel_vdd_off_sync(intel_dp);
+
+       /*
+        * VLV seems to get confused when multiple power seqeuencers
+        * have the same port selected (even if only one has power/vdd
+        * enabled). The failure manifests as vlv_wait_port_ready() failing
+        * CHV on the other hand doesn't seem to mind having the same port
+        * selected in multiple power seqeuencers, but let's clear the
+        * port select always when logically disconnecting a power sequencer
+        * from a port.
+        */
+       DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
+                     pipe_name(pipe), port_name(intel_dig_port->port));
+       I915_WRITE(pp_on_reg, 0);
+       POSTING_READ(pp_on_reg);
+
+       intel_dp->pps_pipe = INVALID_PIPE;
+}
+
 static void vlv_steal_power_sequencer(struct drm_device *dev,
                                      enum pipe pipe)
 {
@@ -2534,6 +2379,9 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
+       if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+               return;
+
        list_for_each_entry(encoder, &dev->mode_config.encoder_list,
                            base.head) {
                struct intel_dp *intel_dp;
@@ -2551,10 +2399,12 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
                DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
                              pipe_name(pipe), port_name(port));
 
-               /* make sure vdd is off before we steal it */
-               edp_panel_vdd_off_sync(intel_dp);
+               WARN(encoder->connectors_active,
+                    "stealing pipe %c power sequencer from active eDP port %c\n",
+                    pipe_name(pipe), port_name(port));
 
-               intel_dp->pps_pipe = INVALID_PIPE;
+               /* make sure vdd is off before we steal it */
+               vlv_detach_power_sequencer(intel_dp);
        }
 }
 
@@ -2565,10 +2415,12 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-       struct edp_power_seq power_seq;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
+       if (!is_edp(intel_dp))
+               return;
+
        if (intel_dp->pps_pipe == crtc->pipe)
                return;
 
@@ -2578,7 +2430,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
         * we still have control of it.
         */
        if (intel_dp->pps_pipe != INVALID_PIPE)
-               edp_panel_vdd_off_sync(intel_dp);
+               vlv_detach_power_sequencer(intel_dp);
 
        /*
         * We may be stealing the power
@@ -2593,9 +2445,8 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
                      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
 
        /* init power sequencer on this pipe and port */
-       intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
-       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
-                                                     &power_seq);
+       intel_dp_init_panel_power_sequencer(dev, intel_dp);
+       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
 }
 
 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
@@ -2624,15 +2475,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
 
        mutex_unlock(&dev_priv->dpio_lock);
 
-       if (is_edp(intel_dp)) {
-               pps_lock(intel_dp);
-               vlv_init_panel_power_sequencer(intel_dp);
-               pps_unlock(intel_dp);
-       }
-
        intel_enable_dp(encoder);
-
-       vlv_wait_port_ready(dev_priv, dport);
 }
 
 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -2680,6 +2523,15 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
 
        mutex_lock(&dev_priv->dpio_lock);
 
+       /* allow hardware to manage TX FIFO reset source */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+       val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+       val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+
        /* Deassert soft data lane reset*/
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
        val |= CHV_PCS_REQ_SOFTRESET_EN;
@@ -2715,15 +2567,7 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
 
        mutex_unlock(&dev_priv->dpio_lock);
 
-       if (is_edp(intel_dp)) {
-               pps_lock(intel_dp);
-               vlv_init_panel_power_sequencer(intel_dp);
-               pps_unlock(intel_dp);
-       }
-
        intel_enable_dp(encoder);
-
-       vlv_wait_port_ready(dev_priv, dport);
 }
 
 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -2843,7 +2687,9 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        enum port port = dp_to_dig_port(intel_dp)->port;
 
-       if (IS_VALLEYVIEW(dev))
+       if (INTEL_INFO(dev)->gen >= 9)
+               return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+       else if (IS_VALLEYVIEW(dev))
                return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
        else if (IS_GEN7(dev) && port == PORT_A)
                return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -2859,7 +2705,18 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        enum port port = dp_to_dig_port(intel_dp)->port;
 
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+       if (INTEL_INFO(dev)->gen >= 9) {
+               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_3;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_2;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_1;
+               default:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_0;
+               }
+       } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
                        return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3095,12 +2952,26 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
        /* Clear calc init */
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
        val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+       val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+       val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
        val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+       val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+       val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
 
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+       val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+       val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+       val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+       val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+
        /* Program swing deemph */
        for (i = 0; i < 4; i++) {
                val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
@@ -3341,7 +3212,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
        uint32_t signal_levels, mask;
        uint8_t train_set = intel_dp->train_set[0];
 
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
                signal_levels = intel_hsw_signal_levels(train_set);
                mask = DDI_BUF_EMP_MASK;
        } else if (IS_CHERRYVIEW(dev)) {
@@ -3605,7 +3476,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
 
                /* Try 5 times, then try clock recovery if that fails */
                if (tries > 5) {
-                       intel_dp_link_down(intel_dp);
                        intel_dp_start_link_train(intel_dp);
                        intel_dp_set_link_train(intel_dp, &DP,
                                                training_pattern |
@@ -3763,8 +3633,6 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
        if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
                return;
 
-       intel_edp_panel_vdd_on(intel_dp);
-
        if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
                DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
@@ -3772,8 +3640,6 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
        if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
                DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
-
-       intel_edp_panel_vdd_off(intel_dp, false);
 }
 
 static bool
@@ -3787,7 +3653,6 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
        if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
                return false;
 
-       intel_edp_panel_vdd_on(intel_dp);
        if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
                if (buf[0] & DP_MST_CAP) {
                        DRM_DEBUG_KMS("Sink is MST capable\n");
@@ -3797,7 +3662,6 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
                        intel_dp->is_mst = false;
                }
        }
-       intel_edp_panel_vdd_off(intel_dp, false);
 
        drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
        return intel_dp->is_mst;
@@ -3809,26 +3673,48 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct intel_crtc *intel_crtc =
                to_intel_crtc(intel_dig_port->base.base.crtc);
-       u8 buf[1];
+       u8 buf;
+       int test_crc_count;
+       int attempts = 6;
 
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
                return -EIO;
 
-       if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
+       if (!(buf & DP_TEST_CRC_SUPPORTED))
                return -ENOTTY;
 
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+               return -EIO;
+
        if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
-                              DP_TEST_SINK_START) < 0)
+                               buf | DP_TEST_SINK_START) < 0)
+               return -EIO;
+
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
                return -EIO;
+       test_crc_count = buf & DP_TEST_COUNT_MASK;
 
-       /* Wait 2 vblanks to be sure we will have the correct CRC value */
-       intel_wait_for_vblank(dev, intel_crtc->pipe);
-       intel_wait_for_vblank(dev, intel_crtc->pipe);
+       do {
+               if (drm_dp_dpcd_readb(&intel_dp->aux,
+                                     DP_TEST_SINK_MISC, &buf) < 0)
+                       return -EIO;
+               intel_wait_for_vblank(dev, intel_crtc->pipe);
+       } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
+
+       if (attempts == 0) {
+               DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
+               return -ETIMEDOUT;
+       }
 
        if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
                return -EIO;
 
-       drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+               return -EIO;
+       if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
+                              buf & ~DP_TEST_SINK_START) < 0)
+               return -EIO;
+
        return 0;
 }
 
@@ -4450,14 +4336,58 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
         * vdd might still be enabled do to the delayed vdd off.
         * Make sure vdd is actually turned off here.
         */
+       cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
        pps_lock(intel_dp);
        edp_panel_vdd_off_sync(intel_dp);
        pps_unlock(intel_dp);
 }
 
+static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum intel_display_power_domain power_domain;
+
+       lockdep_assert_held(&dev_priv->pps_mutex);
+
+       if (!edp_have_panel_vdd(intel_dp))
+               return;
+
+       /*
+        * The VDD bit needs a power domain reference, so if the bit is
+        * already enabled when we boot or resume, grab this reference and
+        * schedule a vdd off, so we don't hold on to the reference
+        * indefinitely.
+        */
+       DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
+       power_domain = intel_display_port_power_domain(&intel_dig_port->base);
+       intel_display_power_get(dev_priv, power_domain);
+
+       edp_panel_vdd_schedule_off(intel_dp);
+}
+
 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
 {
-       intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
+       struct intel_dp *intel_dp;
+
+       if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
+               return;
+
+       intel_dp = enc_to_intel_dp(encoder);
+
+       pps_lock(intel_dp);
+
+       /*
+        * Read out the current power sequencer assignment,
+        * in case the BIOS did something with it.
+        */
+       if (IS_VALLEYVIEW(encoder->dev))
+               vlv_initial_power_sequencer_setup(intel_dp);
+
+       intel_edp_panel_vdd_sanitize(intel_dp);
+
+       pps_unlock(intel_dp);
 }
 
 static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -4644,16 +4574,20 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
 
 static void
 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
-                                   struct intel_dp *intel_dp,
-                                   struct edp_power_seq *out)
+                                   struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct edp_power_seq cur, vbt, spec, final;
+       struct edp_power_seq cur, vbt, spec,
+               *final = &intel_dp->pps_delays;
        u32 pp_on, pp_off, pp_div, pp;
        int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
+       /* already initialized? */
+       if (final->t11_t12 != 0)
+               return;
+
        if (HAS_PCH_SPLIT(dev)) {
                pp_ctrl_reg = PCH_PP_CONTROL;
                pp_on_reg = PCH_PP_ON_DELAYS;
@@ -4715,7 +4649,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
 
        /* Use the max of the register settings and vbt. If both are
         * unset, fall back to the spec limits. */
-#define assign_final(field)    final.field = (max(cur.field, vbt.field) == 0 ? \
+#define assign_final(field)    final->field = (max(cur.field, vbt.field) == 0 ? \
                                       spec.field : \
                                       max(cur.field, vbt.field))
        assign_final(t1_t3);
@@ -4725,7 +4659,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
        assign_final(t11_t12);
 #undef assign_final
 
-#define get_delay(field)       (DIV_ROUND_UP(final.field, 10))
+#define get_delay(field)       (DIV_ROUND_UP(final->field, 10))
        intel_dp->panel_power_up_delay = get_delay(t1_t3);
        intel_dp->backlight_on_delay = get_delay(t8);
        intel_dp->backlight_off_delay = get_delay(t9);
@@ -4739,21 +4673,18 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
 
        DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
                      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
-
-       if (out)
-               *out = final;
 }
 
 static void
 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
-                                             struct intel_dp *intel_dp,
-                                             struct edp_power_seq *seq)
+                                             struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp_on, pp_off, pp_div, port_sel = 0;
        int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
        int pp_on_reg, pp_off_reg, pp_div_reg;
        enum port port = dp_to_dig_port(intel_dp)->port;
+       const struct edp_power_seq *seq = &intel_dp->pps_delays;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -4836,7 +4767,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
         * hard to tell without seeing the user of this function of this code.
         * Check locking and ordering once that lands.
         */
-       if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
+       if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
                DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
                return;
        }
@@ -4939,40 +4870,8 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
        return downclock_mode;
 }
 
-void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
-{
-       struct drm_device *dev = intel_encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_dp *intel_dp;
-       enum intel_display_power_domain power_domain;
-
-       if (intel_encoder->type != INTEL_OUTPUT_EDP)
-               return;
-
-       intel_dp = enc_to_intel_dp(&intel_encoder->base);
-
-       pps_lock(intel_dp);
-
-       if (!edp_have_panel_vdd(intel_dp))
-               goto out;
-       /*
-        * The VDD bit needs a power domain reference, so if the bit is
-        * already enabled when we boot or resume, grab this reference and
-        * schedule a vdd off, so we don't hold on to the reference
-        * indefinitely.
-        */
-       DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
-       power_domain = intel_display_port_power_domain(intel_encoder);
-       intel_display_power_get(dev_priv, power_domain);
-
-       edp_panel_vdd_schedule_off(intel_dp);
- out:
-       pps_unlock(intel_dp);
-}
-
 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
-                                    struct intel_connector *intel_connector,
-                                    struct edp_power_seq *power_seq)
+                                    struct intel_connector *intel_connector)
 {
        struct drm_connector *connector = &intel_connector->base;
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -4984,18 +4883,19 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        bool has_dpcd;
        struct drm_display_mode *scan;
        struct edid *edid;
+       enum pipe pipe = INVALID_PIPE;
 
        intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
 
        if (!is_edp(intel_dp))
                return true;
 
-       intel_edp_panel_vdd_sanitize(intel_encoder);
+       pps_lock(intel_dp);
+       intel_edp_panel_vdd_sanitize(intel_dp);
+       pps_unlock(intel_dp);
 
        /* Cache DPCD and EDID for edp. */
-       intel_edp_panel_vdd_on(intel_dp);
        has_dpcd = intel_dp_get_dpcd(intel_dp);
-       intel_edp_panel_vdd_off(intel_dp, false);
 
        if (has_dpcd) {
                if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
@@ -5010,7 +4910,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 
        /* We now know it's not a ghost, init power sequence regs. */
        pps_lock(intel_dp);
-       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
+       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
        pps_unlock(intel_dp);
 
        mutex_lock(&dev->mode_config.mutex);
@@ -5052,11 +4952,30 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        if (IS_VALLEYVIEW(dev)) {
                intel_dp->edp_notifier.notifier_call = edp_notify_handler;
                register_reboot_notifier(&intel_dp->edp_notifier);
+
+               /*
+                * Figure out the current pipe for the initial backlight setup.
+                * If the current pipe isn't valid, try the PPS pipe, and if that
+                * fails just assume pipe A.
+                */
+               if (IS_CHERRYVIEW(dev))
+                       pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
+               else
+                       pipe = PORT_TO_PIPE(intel_dp->DP);
+
+               if (pipe != PIPE_A && pipe != PIPE_B)
+                       pipe = intel_dp->pps_pipe;
+
+               if (pipe != PIPE_A && pipe != PIPE_B)
+                       pipe = PIPE_A;
+
+               DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
+                             pipe_name(pipe));
        }
 
        intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
        intel_connector->panel.backlight_power = intel_edp_backlight_power;
-       intel_panel_setup_backlight(connector);
+       intel_panel_setup_backlight(connector, pipe);
 
        return true;
 }
@@ -5071,13 +4990,14 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum port port = intel_dig_port->port;
-       struct edp_power_seq power_seq = { 0 };
        int type;
 
        intel_dp->pps_pipe = INVALID_PIPE;
 
        /* intel_dp vfuncs */
-       if (IS_VALLEYVIEW(dev))
+       if (INTEL_INFO(dev)->gen >= 9)
+               intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
+       else if (IS_VALLEYVIEW(dev))
                intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
        else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
@@ -5086,7 +5006,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        else
                intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
 
-       intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
+       if (INTEL_INFO(dev)->gen >= 9)
+               intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
+       else
+               intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
 
        /* Preserve the current hw state. */
        intel_dp->DP = I915_READ(intel_dp->output_reg);
@@ -5105,6 +5028,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        if (type == DRM_MODE_CONNECTOR_eDP)
                intel_encoder->type = INTEL_OUTPUT_EDP;
 
+       /* eDP only on port B and/or C on vlv/chv */
+       if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
+                   port != PORT_B && port != PORT_C))
+               return false;
+
        DRM_DEBUG_KMS("Adding %s connector on port %c\n",
                        type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
                        port_name(port));
@@ -5147,13 +5075,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 
        if (is_edp(intel_dp)) {
                pps_lock(intel_dp);
-               if (IS_VALLEYVIEW(dev)) {
+               intel_dp_init_panel_power_timestamps(intel_dp);
+               if (IS_VALLEYVIEW(dev))
                        vlv_initial_power_sequencer_setup(intel_dp);
-               } else {
-                       intel_dp_init_panel_power_timestamps(intel_dp);
-                       intel_dp_init_panel_power_sequencer(dev, intel_dp,
-                                                           &power_seq);
-               }
+               else
+                       intel_dp_init_panel_power_sequencer(dev, intel_dp);
                pps_unlock(intel_dp);
        }
 
@@ -5167,7 +5093,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                }
        }
 
-       if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
+       if (!intel_edp_init_connector(intel_dp, intel_connector)) {
                drm_dp_aux_unregister(&intel_dp->aux);
                if (is_edp(intel_dp)) {
                        cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
index d9a7a7865f66af56cfd1e62f9b7127f2016d3af2..bfe359506377852154b43c0307fb7e61c300277c 100644 (file)
@@ -278,7 +278,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
 }
 
 static enum drm_connector_status
-intel_mst_port_dp_detect(struct drm_connector *connector)
+intel_dp_mst_detect(struct drm_connector *connector, bool force)
 {
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct intel_dp *intel_dp = intel_connector->mst_port;
@@ -286,14 +286,6 @@ intel_mst_port_dp_detect(struct drm_connector *connector)
        return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
 }
 
-static enum drm_connector_status
-intel_dp_mst_detect(struct drm_connector *connector, bool force)
-{
-       enum drm_connector_status status;
-       status = intel_mst_port_dp_detect(connector);
-       return status;
-}
-
 static int
 intel_dp_mst_set_property(struct drm_connector *connector,
                          struct drm_property *property,
@@ -393,7 +385,7 @@ static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
 #endif
 }
 
-static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop)
+static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
 {
        struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
index ba715229a54016324411687113d730fec801c479..25fdbb16d4e0defa47f660d2f2a940582b83608f 100644 (file)
@@ -34,6 +34,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_rect.h>
 
 #define DIV_ROUND_CLOSEST_ULL(ll, d)   \
 ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
 
 /* these are outputs from the chip - integrated only
    external chips are via DVO or SDVO output */
-#define INTEL_OUTPUT_UNUSED 0
-#define INTEL_OUTPUT_ANALOG 1
-#define INTEL_OUTPUT_DVO 2
-#define INTEL_OUTPUT_SDVO 3
-#define INTEL_OUTPUT_LVDS 4
-#define INTEL_OUTPUT_TVOUT 5
-#define INTEL_OUTPUT_HDMI 6
-#define INTEL_OUTPUT_DISPLAYPORT 7
-#define INTEL_OUTPUT_EDP 8
-#define INTEL_OUTPUT_DSI 9
-#define INTEL_OUTPUT_UNKNOWN 10
-#define INTEL_OUTPUT_DP_MST 11
+enum intel_output_type {
+       INTEL_OUTPUT_UNUSED = 0,
+       INTEL_OUTPUT_ANALOG = 1,
+       INTEL_OUTPUT_DVO = 2,
+       INTEL_OUTPUT_SDVO = 3,
+       INTEL_OUTPUT_LVDS = 4,
+       INTEL_OUTPUT_TVOUT = 5,
+       INTEL_OUTPUT_HDMI = 6,
+       INTEL_OUTPUT_DISPLAYPORT = 7,
+       INTEL_OUTPUT_EDP = 8,
+       INTEL_OUTPUT_DSI = 9,
+       INTEL_OUTPUT_UNKNOWN = 10,
+       INTEL_OUTPUT_DP_MST = 11,
+};
 
 #define INTEL_DVO_CHIP_NONE 0
 #define INTEL_DVO_CHIP_LVDS 1
@@ -135,7 +138,7 @@ struct intel_encoder {
         */
        struct intel_crtc *new_crtc;
 
-       int type;
+       enum intel_output_type type;
        unsigned int cloneable;
        bool connectors_active;
        void (*hot_plug)(struct intel_encoder *);
@@ -240,6 +243,17 @@ typedef struct dpll {
        int     p;
 } intel_clock_t;
 
+struct intel_plane_state {
+       struct drm_crtc *crtc;
+       struct drm_framebuffer *fb;
+       struct drm_rect src;
+       struct drm_rect dst;
+       struct drm_rect clip;
+       struct drm_rect orig_src;
+       struct drm_rect orig_dst;
+       bool visible;
+};
+
 struct intel_plane_config {
        bool tiled;
        int size;
@@ -278,6 +292,9 @@ struct intel_crtc_config {
         * between pch encoders and cpu encoders. */
        bool has_pch_encoder;
 
+       /* Are we sending infoframes on the attached port */
+       bool has_infoframe;
+
        /* CPU Transcoder for the pipe. Currently this can only differ from the
         * pipe on Haswell (where we have a special eDP transcoder). */
        enum transcoder cpu_transcoder;
@@ -326,7 +343,10 @@ struct intel_crtc_config {
        /* Selected dpll when shared or DPLL_ID_PRIVATE. */
        enum intel_dpll_id shared_dpll;
 
-       /* PORT_CLK_SEL for DDI ports. */
+       /*
+        * - PORT_CLK_SEL for DDI ports on HSW/BDW.
+        * - enum skl_dpll on SKL
+        */
        uint32_t ddi_pll_sel;
 
        /* Actual register state of the dpll, for shared dpll cross-checking. */
@@ -387,7 +407,14 @@ struct intel_pipe_wm {
 
 struct intel_mmio_flip {
        u32 seqno;
-       u32 ring_id;
+       struct intel_engine_cs *ring;
+       struct work_struct work;
+};
+
+struct skl_pipe_wm {
+       struct skl_wm_level wm[8];
+       struct skl_wm_level trans_wm;
+       uint32_t linetime;
 };
 
 struct intel_crtc {
@@ -437,6 +464,8 @@ struct intel_crtc {
        struct {
                /* watermarks currently being used  */
                struct intel_pipe_wm active;
+               /* SKL wm values currently in use */
+               struct skl_pipe_wm skl_active;
        } wm;
 
        int scanline_offset;
@@ -529,6 +558,7 @@ struct intel_hdmi {
        void (*set_infoframes)(struct drm_encoder *encoder,
                               bool enable,
                               struct drm_display_mode *adjusted_mode);
+       bool (*infoframe_enabled)(struct drm_encoder *encoder);
 };
 
 struct intel_dp_mst_encoder;
@@ -578,6 +608,7 @@ struct intel_dp {
         * this port. Only relevant on VLV/CHV.
         */
        enum pipe pps_pipe;
+       struct edp_power_seq pps_delays;
 
        bool use_tps3;
        bool can_mst; /* this port supports mst */
@@ -734,32 +765,47 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
        return container_of(intel_hdmi, struct intel_digital_port, hdmi);
 }
 
+/*
+ * Returns the number of planes for this pipe, ie the number of sprites + 1
+ * (primary plane). This doesn't count the cursor plane then.
+ */
+static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
+{
+       return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
+}
 
-/* i915_irq.c */
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+/* intel_fifo_underrun.c */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
                                           enum pipe pipe, bool enable);
-bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
                                           enum transcoder pch_transcoder,
                                           bool enable);
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+                                        enum pipe pipe);
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+                                        enum transcoder pch_transcoder);
+void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv);
+
+/* i915_irq.c */
 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
-void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
+void gen6_reset_rps_interrupts(struct drm_device *dev);
+void gen6_enable_rps_interrupts(struct drm_device *dev);
+void gen6_disable_rps_interrupts(struct drm_device *dev);
+void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
 static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
 {
        /*
         * We only use drm_irq_uninstall() at unload and VT switch, so
         * this is the only thing we need to check.
         */
-       return !dev_priv->pm._irqs_disabled;
+       return dev_priv->pm.irqs_enabled;
 }
 
 int intel_get_crtc_scanline(struct intel_crtc *crtc);
-void i9xx_check_fifo_underruns(struct drm_device *dev);
 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
 
 /* intel_crt.c */
@@ -792,11 +838,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
                         struct intel_crtc_config *pipe_config);
 void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
 
-/* intel_display.c */
-const char *intel_output_name(int output);
-bool intel_has_pending_fb_unpin(struct drm_device *dev);
-int intel_pch_rawclk(struct drm_device *dev);
-void intel_mark_busy(struct drm_device *dev);
+/* intel_frontbuffer.c */
 void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
                             struct intel_engine_cs *ring);
 void intel_frontbuffer_flip_prepare(struct drm_device *dev,
@@ -806,7 +848,7 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
 void intel_frontbuffer_flush(struct drm_device *dev,
                             unsigned frontbuffer_bits);
 /**
- * intel_frontbuffer_flip - prepare frontbuffer flip
+ * intel_frontbuffer_flip - synchronous frontbuffer flip
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
@@ -824,6 +866,18 @@ void intel_frontbuffer_flip(struct drm_device *dev,
 }
 
 void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
+
+
+/* intel_audio.c */
+void intel_init_audio(struct drm_device *dev);
+void intel_audio_codec_enable(struct intel_encoder *encoder);
+void intel_audio_codec_disable(struct intel_encoder *encoder);
+
+/* intel_display.c */
+const char *intel_output_name(int output);
+bool intel_has_pending_fb_unpin(struct drm_device *dev);
+int intel_pch_rawclk(struct drm_device *dev);
+void intel_mark_busy(struct drm_device *dev);
 void intel_mark_idle(struct drm_device *dev);
 void intel_crtc_restore_mode(struct drm_crtc *crtc);
 void intel_crtc_control(struct drm_crtc *crtc, bool enable);
@@ -844,7 +898,12 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
                                             enum pipe pipe);
-void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type);
+static inline void
+intel_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+       drm_wait_one_vblank(dev, pipe);
+}
 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
                         struct intel_digital_port *dport);
@@ -854,8 +913,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
                                struct drm_modeset_acquire_ctx *ctx);
 void intel_release_load_detect_pipe(struct drm_connector *connector,
                                    struct intel_load_detect_pipe *old);
-int intel_pin_and_fence_fb_obj(struct drm_device *dev,
-                              struct drm_i915_gem_object *obj,
+int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
+                              struct drm_framebuffer *fb,
                               struct intel_engine_cs *pipelined);
 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
 struct drm_framebuffer *
@@ -877,7 +936,13 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
 void intel_put_shared_dpll(struct intel_crtc *crtc);
 
+void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+                     const struct dpll *dpll);
+void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
+
 /* modesetting asserts */
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+                          enum pipe pipe);
 void assert_pll(struct drm_i915_private *dev_priv,
                enum pipe pipe, bool state);
 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
@@ -889,13 +954,12 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
 void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-void intel_write_eld(struct drm_encoder *encoder,
-                    struct drm_display_mode *mode);
 unsigned long intel_gen4_compute_page_offset(int *x, int *y,
                                             unsigned int tiling_mode,
                                             unsigned int bpp,
                                             unsigned int pitch);
-void intel_display_handle_reset(struct drm_device *dev);
+void intel_prepare_reset(struct drm_device *dev);
+void intel_finish_reset(struct drm_device *dev);
 void hsw_enable_pc8(struct drm_i915_private *dev_priv);
 void hsw_disable_pc8(struct drm_i915_private *dev_priv);
 void intel_dp_get_m_n(struct intel_crtc *crtc,
@@ -908,7 +972,6 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
 bool intel_crtc_active(struct drm_crtc *crtc);
 void hsw_enable_ips(struct intel_crtc *crtc);
 void hsw_disable_ips(struct intel_crtc *crtc);
-void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
 enum intel_display_power_domain
 intel_display_port_power_domain(struct intel_encoder *intel_encoder);
 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
@@ -936,25 +999,18 @@ bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
 void intel_edp_backlight_on(struct intel_dp *intel_dp);
 void intel_edp_backlight_off(struct intel_dp *intel_dp);
 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
-void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder);
 void intel_edp_panel_on(struct intel_dp *intel_dp);
 void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_edp_psr_enable(struct intel_dp *intel_dp);
-void intel_edp_psr_disable(struct intel_dp *intel_dp);
 void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
-void intel_edp_psr_invalidate(struct drm_device *dev,
-                             unsigned frontbuffer_bits);
-void intel_edp_psr_flush(struct drm_device *dev,
-                        unsigned frontbuffer_bits);
-void intel_edp_psr_init(struct drm_device *dev);
-
-int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
 void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
 void intel_dp_mst_suspend(struct drm_device *dev);
 void intel_dp_mst_resume(struct drm_device *dev);
 int intel_dp_max_link_bw(struct intel_dp *intel_dp);
 void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
+uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
+void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
+
 /* intel_dp_mst.c */
 int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
 void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
@@ -1044,7 +1100,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc,
                              int fitting_mode);
 void intel_panel_set_backlight_acpi(struct intel_connector *connector,
                                    u32 level, u32 max);
-int intel_panel_setup_backlight(struct drm_connector *connector);
+int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe);
 void intel_panel_enable_backlight(struct intel_connector *connector);
 void intel_panel_disable_backlight(struct intel_connector *connector);
 void intel_panel_destroy_backlight(struct drm_connector *connector);
@@ -1054,6 +1110,41 @@ extern struct drm_display_mode *intel_find_panel_downclock(
                                struct drm_device *dev,
                                struct drm_display_mode *fixed_mode,
                                struct drm_connector *connector);
+void intel_backlight_register(struct drm_device *dev);
+void intel_backlight_unregister(struct drm_device *dev);
+
+
+/* intel_psr.c */
+bool intel_psr_is_enabled(struct drm_device *dev);
+void intel_psr_enable(struct intel_dp *intel_dp);
+void intel_psr_disable(struct intel_dp *intel_dp);
+void intel_psr_invalidate(struct drm_device *dev,
+                             unsigned frontbuffer_bits);
+void intel_psr_flush(struct drm_device *dev,
+                        unsigned frontbuffer_bits);
+void intel_psr_init(struct drm_device *dev);
+
+/* intel_runtime_pm.c */
+int intel_power_domains_init(struct drm_i915_private *);
+void intel_power_domains_fini(struct drm_i915_private *);
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
+
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+                                   enum intel_display_power_domain domain);
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+                                     enum intel_display_power_domain domain);
+void intel_display_power_get(struct drm_i915_private *dev_priv,
+                            enum intel_display_power_domain domain);
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+                            enum intel_display_power_domain domain);
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
+
+void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
 
 /* intel_pm.c */
 void intel_init_clock_gating(struct drm_device *dev);
@@ -1072,17 +1163,6 @@ bool intel_fbc_enabled(struct drm_device *dev);
 void intel_update_fbc(struct drm_device *dev);
 void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
 void intel_gpu_ips_teardown(void);
-int intel_power_domains_init(struct drm_i915_private *);
-void intel_power_domains_remove(struct drm_i915_private *);
-bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
-                                enum intel_display_power_domain domain);
-bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
-                                         enum intel_display_power_domain domain);
-void intel_display_power_get(struct drm_i915_private *dev_priv,
-                            enum intel_display_power_domain domain);
-void intel_display_power_put(struct drm_i915_private *dev_priv,
-                            enum intel_display_power_domain domain);
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
 void intel_init_gt_powersave(struct drm_device *dev);
 void intel_cleanup_gt_powersave(struct drm_device *dev);
 void intel_enable_gt_powersave(struct drm_device *dev);
@@ -1093,14 +1173,10 @@ void ironlake_teardown_rc6(struct drm_device *dev);
 void gen6_update_ring_freq(struct drm_device *dev);
 void gen6_rps_idle(struct drm_i915_private *dev_priv);
 void gen6_rps_boost(struct drm_i915_private *dev_priv);
-void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
-void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
-void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
-void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
 void ilk_wm_get_hw_state(struct drm_device *dev);
+void skl_wm_get_hw_state(struct drm_device *dev);
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+                         struct skl_ddb_allocation *ddb /* out */);
 
 
 /* intel_sdvo.c */
@@ -1120,7 +1196,9 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
                              struct drm_file *file_priv);
 int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
                              struct drm_file *file_priv);
-
+bool intel_pipe_update_start(struct intel_crtc *crtc,
+                            uint32_t *start_vbl_count);
+void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
 
 /* intel_tv.c */
 void intel_tv_init(struct drm_device *dev);
index 5bd9e09ad3c5ddac4202a6f21e6fb7198572ffb2..0b184079de14882e86fc19ddc8f2fdb076e0fa8a 100644 (file)
@@ -344,7 +344,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
        DRM_DEBUG_KMS("\n");
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_enabled(dev_priv, power_domain))
+       if (!intel_display_power_is_enabled(dev_priv, power_domain))
                return false;
 
        /* XXX: this only works for one DSI output */
index 9b584f3fbb9912808709bf13ccb002b188eb1c0f..f2183b554cbc7708db5567f4cb65e32061de594d 100644 (file)
@@ -119,25 +119,25 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
                goto out;
        }
 
-       /* Flush everything out, we'll be doing GTT only from now on */
-       ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
-       if (ret) {
-               DRM_ERROR("failed to pin obj: %d\n", ret);
-               goto out_unref;
-       }
-
        fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
        if (IS_ERR(fb)) {
                ret = PTR_ERR(fb);
-               goto out_unpin;
+               goto out_unref;
+       }
+
+       /* Flush everything out, we'll be doing GTT only from now on */
+       ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL);
+       if (ret) {
+               DRM_ERROR("failed to pin obj: %d\n", ret);
+               goto out_fb;
        }
 
        ifbdev->fb = to_intel_framebuffer(fb);
 
        return 0;
 
-out_unpin:
-       i915_gem_object_ggtt_unpin(obj);
+out_fb:
+       drm_framebuffer_remove(fb);
 out_unref:
        drm_gem_object_unreference(&obj->base);
 out:
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
new file mode 100644 (file)
index 0000000..77af512
--- /dev/null
@@ -0,0 +1,381 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/**
+ * DOC: fifo underrun handling
+ *
+ * The i915 driver checks for display fifo underruns using the interrupt signals
+ * provided by the hardware. This is enabled by default and fairly useful to
+ * debug display issues, especially watermark settings.
+ *
+ * If an underrun is detected this is logged into dmesg. To avoid flooding logs
+ * and occupying the cpu underrun interrupts are disabled after the first
+ * occurrence until the next modeset on a given pipe.
+ *
+ * Note that underrun detection on gmch platforms is a bit more ugly since there
+ * is no interrupt (despite that the signalling bit is in the PIPESTAT pipe
+ * interrupt register). Also on some other platforms underrun interrupts are
+ * shared, which means that if we detect an underrun we need to disable underrun
+ * reporting on all pipes.
+ *
+ * The code also supports underrun detection on the PCH transcoder.
+ */
+
+static bool ivb_can_enable_err_int(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *crtc;
+       enum pipe pipe;
+
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       for_each_pipe(dev_priv, pipe) {
+               crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+               if (crtc->cpu_fifo_underrun_disabled)
+                       return false;
+       }
+
+       return true;
+}
+
+static bool cpt_can_enable_serr_int(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum pipe pipe;
+       struct intel_crtc *crtc;
+
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       for_each_pipe(dev_priv, pipe) {
+               crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+               if (crtc->pch_fifo_underrun_disabled)
+                       return false;
+       }
+
+       return true;
+}
+
+/**
+ * i9xx_check_fifo_underruns - check for fifo underruns
+ * @dev_priv: i915 device instance
+ *
+ * This function checks for fifo underruns on GMCH platforms. This needs to be
+ * done manually on modeset to make sure that we catch all underruns since they
+ * do not generate an interrupt by themselves on these platforms.
+ */
+void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv)
+{
+       struct intel_crtc *crtc;
+
+       spin_lock_irq(&dev_priv->irq_lock);
+
+       for_each_intel_crtc(dev_priv->dev, crtc) {
+               u32 reg = PIPESTAT(crtc->pipe);
+               u32 pipestat;
+
+               if (crtc->cpu_fifo_underrun_disabled)
+                       continue;
+
+               pipestat = I915_READ(reg) & 0xffff0000;
+               if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+                       continue;
+
+               I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+               POSTING_READ(reg);
+
+               DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
+       }
+
+       spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
+                                            enum pipe pipe,
+                                            bool enable, bool old)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 reg = PIPESTAT(pipe);
+       u32 pipestat = I915_READ(reg) & 0xffff0000;
+
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       if (enable) {
+               I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+               POSTING_READ(reg);
+       } else {
+               if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
+                       DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+       }
+}
+
+static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
+                                                enum pipe pipe, bool enable)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
+                                         DE_PIPEB_FIFO_UNDERRUN;
+
+       if (enable)
+               ironlake_enable_display_irq(dev_priv, bit);
+       else
+               ironlake_disable_display_irq(dev_priv, bit);
+}
+
+static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
+                                                 enum pipe pipe,
+                                                 bool enable, bool old)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       if (enable) {
+               I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+
+               if (!ivb_can_enable_err_int(dev))
+                       return;
+
+               ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+       } else {
+               ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+
+               if (old &&
+                   I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
+                       DRM_ERROR("uncleared fifo underrun on pipe %c\n",
+                                 pipe_name(pipe));
+               }
+       }
+}
+
+static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
+                                                 enum pipe pipe, bool enable)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       if (enable)
+               dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
+       else
+               dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
+       I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+       POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+}
+
+static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
+                                           enum transcoder pch_transcoder,
+                                           bool enable)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
+                      SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
+
+       if (enable)
+               ibx_enable_display_interrupt(dev_priv, bit);
+       else
+               ibx_disable_display_interrupt(dev_priv, bit);
+}
+
+static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
+                                           enum transcoder pch_transcoder,
+                                           bool enable, bool old)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (enable) {
+               I915_WRITE(SERR_INT,
+                          SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+
+               if (!cpt_can_enable_serr_int(dev))
+                       return;
+
+               ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+       } else {
+               ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+
+               if (old && I915_READ(SERR_INT) &
+                   SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
+                       DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
+                                 transcoder_name(pch_transcoder));
+               }
+       }
+}
+
+static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+                                                   enum pipe pipe, bool enable)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       bool old;
+
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       old = !intel_crtc->cpu_fifo_underrun_disabled;
+       intel_crtc->cpu_fifo_underrun_disabled = !enable;
+
+       if (HAS_GMCH_DISPLAY(dev))
+               i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
+       else if (IS_GEN5(dev) || IS_GEN6(dev))
+               ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
+       else if (IS_GEN7(dev))
+               ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
+       else if (IS_GEN8(dev) || IS_GEN9(dev))
+               broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
+
+       return old;
+}
+
+/**
+ * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state
+ * @dev_priv: i915 device instance
+ * @pipe: (CPU) pipe to set state for
+ * @enable: whether underruns should be reported or not
+ *
+ * This function sets the fifo underrun state for @pipe. It is used in the
+ * modeset code to avoid false positives since on many platforms underruns are
+ * expected when disabling or enabling the pipe.
+ *
+ * Notice that on some platforms disabling underrun reports for one pipe
+ * disables for all due to shared interrupts. Actual reporting is still per-pipe
+ * though.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+                                          enum pipe pipe, bool enable)
+{
+       unsigned long flags;
+       bool ret;
+
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe,
+                                                     enable);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+       return ret;
+}
+
+static bool
+__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
+                                     enum pipe pipe)
+{
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       return !intel_crtc->cpu_fifo_underrun_disabled;
+}
+
+/**
+ * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
+ * @dev_priv: i915 device instance
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ * @enable: whether underruns should be reported or not
+ *
+ * This function makes us disable or enable PCH fifo underruns for a specific
+ * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
+ * underrun reporting for one transcoder may also disable all the other PCH
+ * error interruts for the other transcoders, due to the fact that there's just
+ * one interrupt mask/enable bit for all the transcoders.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+                                          enum transcoder pch_transcoder,
+                                          bool enable)
+{
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       unsigned long flags;
+       bool old;
+
+       /*
+        * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
+        * has only one pch transcoder A that all pipes can use. To avoid racy
+        * pch transcoder -> pipe lookups from interrupt code simply store the
+        * underrun statistics in crtc A. Since we never expose this anywhere
+        * nor use it outside of the fifo underrun code here using the "wrong"
+        * crtc on LPT won't cause issues.
+        */
+
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+       old = !intel_crtc->pch_fifo_underrun_disabled;
+       intel_crtc->pch_fifo_underrun_disabled = !enable;
+
+       if (HAS_PCH_IBX(dev_priv->dev))
+               ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+                                               enable);
+       else
+               cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+                                               enable, old);
+
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       return old;
+}
+
+/**
+ * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
+ * @dev_priv: i915 device instance
+ * @pipe: (CPU) pipe to set state for
+ *
+ * This handles a CPU fifo underrun interrupt, generating an underrun warning
+ * into dmesg if underrun reporting is enabled and then disables the underrun
+ * interrupt to avoid an irq storm.
+ */
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+                                        enum pipe pipe)
+{
+       /* GMCH can't disable fifo underruns, filter them. */
+       if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
+           !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe))
+               return;
+
+       if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
+               DRM_ERROR("CPU pipe %c FIFO underrun\n",
+                         pipe_name(pipe));
+}
+
+/**
+ * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
+ * @dev_priv: i915 device instance
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ *
+ * This handles a PCH fifo underrun interrupt, generating an underrun warning
+ * into dmesg if underrun reporting is enabled and then disables the underrun
+ * interrupt to avoid an irq storm.
+ */
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+                                        enum transcoder pch_transcoder)
+{
+       if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
+                                                 false))
+               DRM_ERROR("PCH transcoder %c FIFO underrun\n",
+                         transcoder_name(pch_transcoder));
+}
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
new file mode 100644 (file)
index 0000000..79f6d72
--- /dev/null
@@ -0,0 +1,279 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+/**
+ * DOC: frontbuffer tracking
+ *
+ * Many features require us to track changes to the currently active
+ * frontbuffer, especially rendering targeted at the frontbuffer.
+ *
+ * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
+ * frontbuffer slots through i915_gem_track_fb(). The function in this file are
+ * then called when the contents of the frontbuffer are invalidated, when
+ * frontbuffer rendering has stopped again to flush out all the changes and when
+ * the frontbuffer is exchanged with a flip. Subsystems interested in
+ * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
+ * into the relevant places and filter for the frontbuffer slots that they are
+ * interested int.
+ *
+ * On a high level there are two types of powersaving features. The first one
+ * work like a special cache (FBC and PSR) and are interested when they should
+ * stop caching and when to restart caching. This is done by placing callbacks
+ * into the invalidate and the flush functions: At invalidate the caching must
+ * be stopped and at flush time it can be restarted. And maybe they need to know
+ * when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
+ * and flush on its own) which can be achieved with placing callbacks into the
+ * flip functions.
+ *
+ * The other type of display power saving feature only cares about busyness
+ * (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
+ * busyness. There is no direct way to detect idleness. Instead an idle timer
+ * work delayed work should be started from the flush and flip functions and
+ * cancelled as soon as busyness is detected.
+ *
+ * Note that there's also an older frontbuffer activity tracking scheme which
+ * just tracks general activity. This is done by the various mark_busy and
+ * mark_idle functions. For display power management features using these
+ * functions is deprecated and should be avoided.
+ */
+
+#include <drm/drmP.h>
+
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+static void intel_increase_pllclock(struct drm_device *dev,
+                                   enum pipe pipe)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int dpll_reg = DPLL(pipe);
+       int dpll;
+
+       if (!HAS_GMCH_DISPLAY(dev))
+               return;
+
+       if (!dev_priv->lvds_downclock_avail)
+               return;
+
+       dpll = I915_READ(dpll_reg);
+       if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
+               DRM_DEBUG_DRIVER("upclocking LVDS\n");
+
+               assert_panel_unlocked(dev_priv, pipe);
+
+               dpll &= ~DISPLAY_RATE_SELECT_FPA1;
+               I915_WRITE(dpll_reg, dpll);
+               intel_wait_for_vblank(dev, pipe);
+
+               dpll = I915_READ(dpll_reg);
+               if (dpll & DISPLAY_RATE_SELECT_FPA1)
+                       DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
+       }
+}
+
+/**
+ * intel_mark_fb_busy - mark given planes as busy
+ * @dev: DRM device
+ * @frontbuffer_bits: bits for the affected planes
+ * @ring: optional ring for asynchronous commands
+ *
+ * This function gets called every time the screen contents change. It can be
+ * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
+ */
+static void intel_mark_fb_busy(struct drm_device *dev,
+                              unsigned frontbuffer_bits,
+                              struct intel_engine_cs *ring)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum pipe pipe;
+
+       if (!i915.powersave)
+               return;
+
+       for_each_pipe(dev_priv, pipe) {
+               if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
+                       continue;
+
+               intel_increase_pllclock(dev, pipe);
+               if (ring && intel_fbc_enabled(dev))
+                       ring->fbc_dirty = true;
+       }
+}
+
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @ring: set for asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+                            struct intel_engine_cs *ring)
+{
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       if (!obj->frontbuffer_bits)
+               return;
+
+       if (ring) {
+               mutex_lock(&dev_priv->fb_tracking.lock);
+               dev_priv->fb_tracking.busy_bits
+                       |= obj->frontbuffer_bits;
+               dev_priv->fb_tracking.flip_bits
+                       &= ~obj->frontbuffer_bits;
+               mutex_unlock(&dev_priv->fb_tracking.lock);
+       }
+
+       intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
+
+       intel_psr_invalidate(dev, obj->frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flush - flush frontbuffer
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed and frontbuffer caching can be started again. Flushes will get
+ * delayed if they're blocked by some outstanding asynchronous rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flush(struct drm_device *dev,
+                            unsigned frontbuffer_bits)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* Delay flushing when rings are still busy.*/
+       mutex_lock(&dev_priv->fb_tracking.lock);
+       frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
+       mutex_unlock(&dev_priv->fb_tracking.lock);
+
+       intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+
+       intel_psr_flush(dev, frontbuffer_bits);
+
+       /*
+        * FIXME: Unconditional fbc flushing here is a rather gross hack and
+        * needs to be reworked into a proper frontbuffer tracking scheme like
+        * psr employs.
+        */
+       if (dev_priv->fbc.need_sw_cache_clean) {
+               dev_priv->fbc.need_sw_cache_clean = false;
+               bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
+       }
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+                       bool retire)
+{
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned frontbuffer_bits;
+
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       if (!obj->frontbuffer_bits)
+               return;
+
+       frontbuffer_bits = obj->frontbuffer_bits;
+
+       if (retire) {
+               mutex_lock(&dev_priv->fb_tracking.lock);
+               /* Filter out new bits since rendering started. */
+               frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
+
+               dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+               mutex_unlock(&dev_priv->fb_tracking.lock);
+       }
+
+       intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. The actual
+ * frontbuffer flushing will be delayed until completion is signalled with
+ * intel_frontbuffer_flip_complete. If an invalidate happens in between this
+ * flush will be cancelled.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+                                   unsigned frontbuffer_bits)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       mutex_lock(&dev_priv->fb_tracking.lock);
+       dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
+       /* Remove stale busy bits due to the old buffer. */
+       dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+       mutex_unlock(&dev_priv->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after the flip has been latched and will complete
+ * on the next vblank. It will execute the flush if it hasn't been cancelled yet.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+                                    unsigned frontbuffer_bits)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       mutex_lock(&dev_priv->fb_tracking.lock);
+       /* Mask any cancelled flips. */
+       frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
+       dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+       mutex_unlock(&dev_priv->fb_tracking.lock);
+
+       intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
index 29ec1535992d413fa5faf70c8192c02d6334b02c..3abc2000fce9106843693290304e8e477794c783 100644 (file)
@@ -166,6 +166,19 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
        POSTING_READ(VIDEO_DIP_CTL);
 }
 
+static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       u32 val = I915_READ(VIDEO_DIP_CTL);
+
+       if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
+               return val & VIDEO_DIP_ENABLE;
+
+       return false;
+}
+
 static void ibx_write_infoframe(struct drm_encoder *encoder,
                                enum hdmi_infoframe_type type,
                                const void *frame, ssize_t len)
@@ -204,6 +217,17 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
        POSTING_READ(reg);
 }
 
+static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+       u32 val = I915_READ(reg);
+
+       return val & VIDEO_DIP_ENABLE;
+}
+
 static void cpt_write_infoframe(struct drm_encoder *encoder,
                                enum hdmi_infoframe_type type,
                                const void *frame, ssize_t len)
@@ -245,6 +269,17 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
        POSTING_READ(reg);
 }
 
+static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+       u32 val = I915_READ(reg);
+
+       return val & VIDEO_DIP_ENABLE;
+}
+
 static void vlv_write_infoframe(struct drm_encoder *encoder,
                                enum hdmi_infoframe_type type,
                                const void *frame, ssize_t len)
@@ -283,6 +318,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
        POSTING_READ(reg);
 }
 
+static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+       u32 val = I915_READ(reg);
+
+       return val & VIDEO_DIP_ENABLE;
+}
+
 static void hsw_write_infoframe(struct drm_encoder *encoder,
                                enum hdmi_infoframe_type type,
                                const void *frame, ssize_t len)
@@ -320,6 +366,18 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
        POSTING_READ(ctl_reg);
 }
 
+static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+       u32 val = I915_READ(ctl_reg);
+
+       return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
+                     VIDEO_DIP_ENABLE_VS_HSW);
+}
+
 /*
  * The data we write to the DIP data buffer registers is 1 byte bigger than the
  * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
@@ -661,14 +719,6 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
        if (crtc->config.has_hdmi_sink)
                hdmi_val |= HDMI_MODE_SELECT_HDMI;
 
-       if (crtc->config.has_audio) {
-               WARN_ON(!crtc->config.has_hdmi_sink);
-               DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
-                                pipe_name(crtc->pipe));
-               hdmi_val |= SDVO_AUDIO_ENABLE;
-               intel_write_eld(&encoder->base, adjusted_mode);
-       }
-
        if (HAS_PCH_CPT(dev))
                hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
        else if (IS_CHERRYVIEW(dev))
@@ -690,7 +740,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
        u32 tmp;
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_enabled(dev_priv, power_domain))
+       if (!intel_display_power_is_enabled(dev_priv, power_domain))
                return false;
 
        tmp = I915_READ(intel_hdmi->hdmi_reg);
@@ -732,6 +782,9 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
        if (tmp & HDMI_MODE_SELECT_HDMI)
                pipe_config->has_hdmi_sink = true;
 
+       if (intel_hdmi->infoframe_enabled(&encoder->base))
+               pipe_config->has_infoframe = true;
+
        if (tmp & SDVO_AUDIO_ENABLE)
                pipe_config->has_audio = true;
 
@@ -791,6 +844,13 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
                I915_WRITE(intel_hdmi->hdmi_reg, temp);
                POSTING_READ(intel_hdmi->hdmi_reg);
        }
+
+       if (intel_crtc->config.has_audio) {
+               WARN_ON(!intel_crtc->config.has_hdmi_sink);
+               DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+                                pipe_name(intel_crtc->pipe));
+               intel_audio_codec_enable(encoder);
+       }
 }
 
 static void vlv_enable_hdmi(struct intel_encoder *encoder)
@@ -802,9 +862,13 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        u32 temp;
        u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
 
+       if (crtc->config.has_audio)
+               intel_audio_codec_disable(encoder);
+
        temp = I915_READ(intel_hdmi->hdmi_reg);
 
        /* HW workaround for IBX, we need to move the port to transcoder A
@@ -922,6 +986,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
 
        pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
 
+       if (pipe_config->has_hdmi_sink)
+               pipe_config->has_infoframe = true;
+
        if (intel_hdmi->color_range_auto) {
                /* See CEA-861-E - 5.1 Default Encoding Parameters */
                if (pipe_config->has_hdmi_sink &&
@@ -1394,10 +1461,13 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder)
 static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       struct intel_hdmi *intel_hdmi = &dport->hdmi;
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc =
                to_intel_crtc(encoder->base.crtc);
+       struct drm_display_mode *adjusted_mode =
+               &intel_crtc->config.adjusted_mode;
        enum dpio_channel ch = vlv_dport_to_channel(dport);
        int pipe = intel_crtc->pipe;
        int data, i;
@@ -1405,6 +1475,15 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
 
        mutex_lock(&dev_priv->dpio_lock);
 
+       /* allow hardware to manage TX FIFO reset source */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+       val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+       val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+
        /* Deassert soft data lane reset*/
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
        val |= CHV_PCS_REQ_SOFTRESET_EN;
@@ -1441,12 +1520,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
        /* Clear calc init */
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
        val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+       val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+       val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
        val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+       val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+       val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
 
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+       val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+       val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+       val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+       val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+
        /* FIXME: Program the support xxx V-dB */
        /* Use 800mV-0dB */
        for (i = 0; i < 4; i++) {
@@ -1499,6 +1592,10 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
 
        mutex_unlock(&dev_priv->dpio_lock);
 
+       intel_hdmi->set_infoframes(&encoder->base,
+                                  intel_crtc->config.has_hdmi_sink,
+                                  adjusted_mode);
+
        intel_enable_hdmi(encoder);
 
        vlv_wait_port_ready(dev_priv, dport);
@@ -1593,18 +1690,23 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
        if (IS_VALLEYVIEW(dev)) {
                intel_hdmi->write_infoframe = vlv_write_infoframe;
                intel_hdmi->set_infoframes = vlv_set_infoframes;
+               intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
        } else if (IS_G4X(dev)) {
                intel_hdmi->write_infoframe = g4x_write_infoframe;
                intel_hdmi->set_infoframes = g4x_set_infoframes;
+               intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
        } else if (HAS_DDI(dev)) {
                intel_hdmi->write_infoframe = hsw_write_infoframe;
                intel_hdmi->set_infoframes = hsw_set_infoframes;
+               intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
        } else if (HAS_PCH_IBX(dev)) {
                intel_hdmi->write_infoframe = ibx_write_infoframe;
                intel_hdmi->set_infoframes = ibx_set_infoframes;
+               intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
        } else {
                intel_hdmi->write_infoframe = cpt_write_infoframe;
                intel_hdmi->set_infoframes = cpt_set_infoframes;
+               intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
        }
 
        if (HAS_DDI(dev))
index bafd38b5703eba00e2ca3c2ffb00fea7840b4638..e588376227eaf6ec1f8e7ac4d831964f3fac9596 100644 (file)
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
+#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
 
-#define GEN8_LR_CONTEXT_ALIGN 4096
-
 #define RING_EXECLIST_QFULL            (1 << 0x2)
 #define RING_EXECLIST1_VALID           (1 << 0x3)
 #define RING_EXECLIST0_VALID           (1 << 0x4)
@@ -204,6 +203,9 @@ enum {
 };
 #define GEN8_CTX_ID_SHIFT 32
 
+static int intel_lr_context_pin(struct intel_engine_cs *ring,
+               struct intel_context *ctx);
+
 /**
  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
  * @dev: DRM device.
@@ -219,6 +221,9 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
 {
        WARN_ON(i915.enable_ppgtt == -1);
 
+       if (INTEL_INFO(dev)->gen >= 9)
+               return 1;
+
        if (enable_execlists == 0)
                return 0;
 
@@ -275,7 +280,8 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
                                 struct drm_i915_gem_object *ctx_obj0,
                                 struct drm_i915_gem_object *ctx_obj1)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        uint64_t temp = 0;
        uint32_t desc[4];
        unsigned long flags;
@@ -300,13 +306,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
         * Instead, we do the runtime_pm_get/put when creating/destroying requests.
         */
        spin_lock_irqsave(&dev_priv->uncore.lock, flags);
-       if (IS_CHERRYVIEW(dev_priv->dev)) {
+       if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
                if (dev_priv->uncore.fw_rendercount++ == 0)
                        dev_priv->uncore.funcs.force_wake_get(dev_priv,
                                                              FORCEWAKE_RENDER);
                if (dev_priv->uncore.fw_mediacount++ == 0)
                        dev_priv->uncore.funcs.force_wake_get(dev_priv,
                                                              FORCEWAKE_MEDIA);
+               if (INTEL_INFO(dev)->gen >= 9) {
+                       if (dev_priv->uncore.fw_blittercount++ == 0)
+                               dev_priv->uncore.funcs.force_wake_get(dev_priv,
+                                                       FORCEWAKE_BLITTER);
+               }
        } else {
                if (dev_priv->uncore.forcewake_count++ == 0)
                        dev_priv->uncore.funcs.force_wake_get(dev_priv,
@@ -325,13 +336,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
 
        /* Release Force Wakeup (see the big comment above). */
        spin_lock_irqsave(&dev_priv->uncore.lock, flags);
-       if (IS_CHERRYVIEW(dev_priv->dev)) {
+       if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
                if (--dev_priv->uncore.fw_rendercount == 0)
                        dev_priv->uncore.funcs.force_wake_put(dev_priv,
                                                              FORCEWAKE_RENDER);
                if (--dev_priv->uncore.fw_mediacount == 0)
                        dev_priv->uncore.funcs.force_wake_put(dev_priv,
                                                              FORCEWAKE_MEDIA);
+               if (INTEL_INFO(dev)->gen >= 9) {
+                       if (--dev_priv->uncore.fw_blittercount == 0)
+                               dev_priv->uncore.funcs.force_wake_put(dev_priv,
+                                                       FORCEWAKE_BLITTER);
+               }
        } else {
                if (--dev_priv->uncore.forcewake_count == 0)
                        dev_priv->uncore.funcs.force_wake_put(dev_priv,
@@ -341,7 +357,9 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
        spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
 }
 
-static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail)
+static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
+                                   struct drm_i915_gem_object *ring_obj,
+                                   u32 tail)
 {
        struct page *page;
        uint32_t *reg_state;
@@ -350,43 +368,45 @@ static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tai
        reg_state = kmap_atomic(page);
 
        reg_state[CTX_RING_TAIL+1] = tail;
+       reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
 
        kunmap_atomic(reg_state);
 
        return 0;
 }
 
-static int execlists_submit_context(struct intel_engine_cs *ring,
-                                   struct intel_context *to0, u32 tail0,
-                                   struct intel_context *to1, u32 tail1)
+static void execlists_submit_contexts(struct intel_engine_cs *ring,
+                                     struct intel_context *to0, u32 tail0,
+                                     struct intel_context *to1, u32 tail1)
 {
-       struct drm_i915_gem_object *ctx_obj0;
+       struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
+       struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
        struct drm_i915_gem_object *ctx_obj1 = NULL;
+       struct intel_ringbuffer *ringbuf1 = NULL;
 
-       ctx_obj0 = to0->engine[ring->id].state;
        BUG_ON(!ctx_obj0);
        WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
+       WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
 
-       execlists_ctx_write_tail(ctx_obj0, tail0);
+       execlists_update_context(ctx_obj0, ringbuf0->obj, tail0);
 
        if (to1) {
+               ringbuf1 = to1->engine[ring->id].ringbuf;
                ctx_obj1 = to1->engine[ring->id].state;
                BUG_ON(!ctx_obj1);
                WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
+               WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
 
-               execlists_ctx_write_tail(ctx_obj1, tail1);
+               execlists_update_context(ctx_obj1, ringbuf1->obj, tail1);
        }
 
        execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
-
-       return 0;
 }
 
 static void execlists_context_unqueue(struct intel_engine_cs *ring)
 {
        struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
        struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
        assert_spin_locked(&ring->execlist_lock);
 
@@ -403,7 +423,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
                         * will update tail past first request's workload */
                        cursor->elsp_submitted = req0->elsp_submitted;
                        list_del(&req0->execlist_link);
-                       queue_work(dev_priv->wq, &req0->work);
+                       list_add_tail(&req0->execlist_link,
+                               &ring->execlist_retired_req_list);
                        req0 = cursor;
                } else {
                        req1 = cursor;
@@ -413,9 +434,9 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
 
        WARN_ON(req1 && req1->elsp_submitted);
 
-       WARN_ON(execlists_submit_context(ring, req0->ctx, req0->tail,
-                                        req1 ? req1->ctx : NULL,
-                                        req1 ? req1->tail : 0));
+       execlists_submit_contexts(ring, req0->ctx, req0->tail,
+                                 req1 ? req1->ctx : NULL,
+                                 req1 ? req1->tail : 0);
 
        req0->elsp_submitted++;
        if (req1)
@@ -425,7 +446,6 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
 static bool execlists_check_remove_request(struct intel_engine_cs *ring,
                                           u32 request_id)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct intel_ctx_submit_request *head_req;
 
        assert_spin_locked(&ring->execlist_lock);
@@ -443,7 +463,8 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
 
                        if (--head_req->elsp_submitted <= 0) {
                                list_del(&head_req->execlist_link);
-                               queue_work(dev_priv->wq, &head_req->work);
+                               list_add_tail(&head_req->execlist_link,
+                                       &ring->execlist_retired_req_list);
                                return true;
                        }
                }
@@ -512,22 +533,6 @@ void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
                   ((u32)ring->next_context_status_buffer & 0x07) << 8);
 }
 
-static void execlists_free_request_task(struct work_struct *work)
-{
-       struct intel_ctx_submit_request *req =
-               container_of(work, struct intel_ctx_submit_request, work);
-       struct drm_device *dev = req->ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       intel_runtime_pm_put(dev_priv);
-
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_context_unreference(req->ctx);
-       mutex_unlock(&dev->struct_mutex);
-
-       kfree(req);
-}
-
 static int execlists_context_queue(struct intel_engine_cs *ring,
                                   struct intel_context *to,
                                   u32 tail)
@@ -542,9 +547,12 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
                return -ENOMEM;
        req->ctx = to;
        i915_gem_context_reference(req->ctx);
+
+       if (to != ring->default_context)
+               intel_lr_context_pin(ring, to);
+
        req->ring = ring;
        req->tail = tail;
-       INIT_WORK(&req->work, execlists_free_request_task);
 
        intel_runtime_pm_get(dev_priv);
 
@@ -563,9 +571,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
 
                if (to == tail_req->ctx) {
                        WARN(tail_req->elsp_submitted != 0,
-                            "More than 2 already-submitted reqs queued\n");
+                               "More than 2 already-submitted reqs queued\n");
                        list_del(&tail_req->execlist_link);
-                       queue_work(dev_priv->wq, &tail_req->work);
+                       list_add_tail(&tail_req->execlist_link,
+                               &ring->execlist_retired_req_list);
                }
        }
 
@@ -733,6 +742,36 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
        return 0;
 }
 
+void intel_execlists_retire_requests(struct intel_engine_cs *ring)
+{
+       struct intel_ctx_submit_request *req, *tmp;
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       unsigned long flags;
+       struct list_head retired_list;
+
+       WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+       if (list_empty(&ring->execlist_retired_req_list))
+               return;
+
+       INIT_LIST_HEAD(&retired_list);
+       spin_lock_irqsave(&ring->execlist_lock, flags);
+       list_replace_init(&ring->execlist_retired_req_list, &retired_list);
+       spin_unlock_irqrestore(&ring->execlist_lock, flags);
+
+       list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
+               struct intel_context *ctx = req->ctx;
+               struct drm_i915_gem_object *ctx_obj =
+                               ctx->engine[ring->id].state;
+
+               if (ctx_obj && (ctx != ring->default_context))
+                       intel_lr_context_unpin(ring, ctx);
+               intel_runtime_pm_put(dev_priv);
+               i915_gem_context_unreference(req->ctx);
+               list_del(&req->execlist_link);
+               kfree(req);
+       }
+}
+
 void intel_logical_ring_stop(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -793,9 +832,55 @@ void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
        execlists_context_queue(ring, ctx, ringbuf->tail);
 }
 
+static int intel_lr_context_pin(struct intel_engine_cs *ring,
+               struct intel_context *ctx)
+{
+       struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+       struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+       int ret = 0;
+
+       WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+       if (ctx->engine[ring->id].unpin_count++ == 0) {
+               ret = i915_gem_obj_ggtt_pin(ctx_obj,
+                               GEN8_LR_CONTEXT_ALIGN, 0);
+               if (ret)
+                       goto reset_unpin_count;
+
+               ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
+               if (ret)
+                       goto unpin_ctx_obj;
+       }
+
+       return ret;
+
+unpin_ctx_obj:
+       i915_gem_object_ggtt_unpin(ctx_obj);
+reset_unpin_count:
+       ctx->engine[ring->id].unpin_count = 0;
+
+       return ret;
+}
+
+void intel_lr_context_unpin(struct intel_engine_cs *ring,
+               struct intel_context *ctx)
+{
+       struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+       struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+
+       if (ctx_obj) {
+               WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+               if (--ctx->engine[ring->id].unpin_count == 0) {
+                       intel_unpin_ringbuffer_obj(ringbuf);
+                       i915_gem_object_ggtt_unpin(ctx_obj);
+               }
+       }
+}
+
 static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
                                    struct intel_context *ctx)
 {
+       int ret;
+
        if (ring->outstanding_lazy_seqno)
                return 0;
 
@@ -806,6 +891,14 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
                if (request == NULL)
                        return -ENOMEM;
 
+               if (ctx != ring->default_context) {
+                       ret = intel_lr_context_pin(ring, ctx);
+                       if (ret) {
+                               kfree(request);
+                               return ret;
+                       }
+               }
+
                /* Hold a reference to the context this request belongs to
                 * (we will need it when the time comes to emit/retire the
                 * request).
@@ -991,6 +1084,44 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
        return 0;
 }
 
+static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
+                                              struct intel_context *ctx)
+{
+       int ret, i;
+       struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_workarounds *w = &dev_priv->workarounds;
+
+       if (WARN_ON(w->count == 0))
+               return 0;
+
+       ring->gpu_caches_dirty = true;
+       ret = logical_ring_flush_all_caches(ringbuf);
+       if (ret)
+               return ret;
+
+       ret = intel_logical_ring_begin(ringbuf, w->count * 2 + 2);
+       if (ret)
+               return ret;
+
+       intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+       for (i = 0; i < w->count; i++) {
+               intel_logical_ring_emit(ringbuf, w->reg[i].addr);
+               intel_logical_ring_emit(ringbuf, w->reg[i].value);
+       }
+       intel_logical_ring_emit(ringbuf, MI_NOOP);
+
+       intel_logical_ring_advance(ringbuf);
+
+       ring->gpu_caches_dirty = true;
+       ret = logical_ring_flush_all_caches(ringbuf);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
 static int gen8_init_common_ring(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
@@ -1034,7 +1165,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
 
        I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-       return ret;
+       return init_workarounds_ring(ring);
 }
 
 static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
@@ -1063,7 +1194,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
-       if (!dev->irq_enabled)
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1214,11 +1345,13 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
  */
 void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv;
 
        if (!intel_ring_initialized(ring))
                return;
 
+       dev_priv = ring->dev->dev_private;
+
        intel_logical_ring_stop(ring);
        WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
        ring->preallocated_lazy_request = NULL;
@@ -1248,6 +1381,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
        init_waitqueue_head(&ring->irq_queue);
 
        INIT_LIST_HEAD(&ring->execlist_queue);
+       INIT_LIST_HEAD(&ring->execlist_retired_req_list);
        spin_lock_init(&ring->execlist_lock);
        ring->next_context_status_buffer = 0;
 
@@ -1282,6 +1416,7 @@ static int logical_render_ring_init(struct drm_device *dev)
                ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
        ring->init = gen8_init_render_ring;
+       ring->init_context = intel_logical_ring_workarounds_emit;
        ring->cleanup = intel_fini_pipe_control;
        ring->get_seqno = gen8_get_seqno;
        ring->set_seqno = gen8_set_seqno;
@@ -1495,7 +1630,6 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *ring_obj = ringbuf->obj;
        struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
        struct page *page;
        uint32_t *reg_state;
@@ -1541,7 +1675,9 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
        reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
        reg_state[CTX_RING_TAIL+1] = 0;
        reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
-       reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
+       /* Ring buffer start address is not known until the buffer is pinned.
+        * It is written to the context image in execlists_update_context()
+        */
        reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
        reg_state[CTX_RING_BUFFER_CONTROL+1] =
                        ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
@@ -1617,12 +1753,18 @@ void intel_lr_context_free(struct intel_context *ctx)
 
        for (i = 0; i < I915_NUM_RINGS; i++) {
                struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
-               struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
 
                if (ctx_obj) {
+                       struct intel_ringbuffer *ringbuf =
+                                       ctx->engine[i].ringbuf;
+                       struct intel_engine_cs *ring = ringbuf->ring;
+
+                       if (ctx == ring->default_context) {
+                               intel_unpin_ringbuffer_obj(ringbuf);
+                               i915_gem_object_ggtt_unpin(ctx_obj);
+                       }
                        intel_destroy_ringbuffer_obj(ringbuf);
                        kfree(ringbuf);
-                       i915_gem_object_ggtt_unpin(ctx_obj);
                        drm_gem_object_unreference(&ctx_obj->base);
                }
        }
@@ -1632,11 +1774,14 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
 {
        int ret = 0;
 
-       WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
+       WARN_ON(INTEL_INFO(ring->dev)->gen < 8);
 
        switch (ring->id) {
        case RCS:
-               ret = GEN8_LR_CONTEXT_RENDER_SIZE;
+               if (INTEL_INFO(ring->dev)->gen >= 9)
+                       ret = GEN9_LR_CONTEXT_RENDER_SIZE;
+               else
+                       ret = GEN8_LR_CONTEXT_RENDER_SIZE;
                break;
        case VCS:
        case BCS:
@@ -1649,6 +1794,23 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
        return ret;
 }
 
+static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
+               struct drm_i915_gem_object *default_ctx_obj)
+{
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+       /* The status page is offset 0 from the default context object
+        * in LRC mode. */
+       ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj);
+       ring->status_page.page_addr =
+                       kmap(sg_page(default_ctx_obj->pages->sgl));
+       ring->status_page.obj = default_ctx_obj;
+
+       I915_WRITE(RING_HWS_PGA(ring->mmio_base),
+                       (u32)ring->status_page.gfx_addr);
+       POSTING_READ(RING_HWS_PGA(ring->mmio_base));
+}
+
 /**
  * intel_lr_context_deferred_create() - create the LRC specific bits of a context
  * @ctx: LR context to create.
@@ -1660,11 +1822,12 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
  * the creation is a deferred call: it's better to make sure first that we need to use
  * a given ring with the context.
  *
- * Return: non-zero on eror.
+ * Return: non-zero on error.
  */
 int intel_lr_context_deferred_create(struct intel_context *ctx,
                                     struct intel_engine_cs *ring)
 {
+       const bool is_global_default_ctx = (ctx == ring->default_context);
        struct drm_device *dev = ring->dev;
        struct drm_i915_gem_object *ctx_obj;
        uint32_t context_size;
@@ -1684,21 +1847,22 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
                return ret;
        }
 
-       ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
-               drm_gem_object_unreference(&ctx_obj->base);
-               return ret;
+       if (is_global_default_ctx) {
+               ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
+               if (ret) {
+                       DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
+                                       ret);
+                       drm_gem_object_unreference(&ctx_obj->base);
+                       return ret;
+               }
        }
 
        ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
        if (!ringbuf) {
                DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
                                ring->name);
-               i915_gem_object_ggtt_unpin(ctx_obj);
-               drm_gem_object_unreference(&ctx_obj->base);
                ret = -ENOMEM;
-               return ret;
+               goto error_unpin_ctx;
        }
 
        ringbuf->ring = ring;
@@ -1711,46 +1875,51 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
        ringbuf->space = ringbuf->size;
        ringbuf->last_retired_head = -1;
 
-       /* TODO: For now we put this in the mappable region so that we can reuse
-        * the existing ringbuffer code which ioremaps it. When we start
-        * creating many contexts, this will no longer work and we must switch
-        * to a kmapish interface.
-        */
-       ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
+       if (ringbuf->obj == NULL) {
+               ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+               if (ret) {
+                       DRM_DEBUG_DRIVER(
+                               "Failed to allocate ringbuffer obj %s: %d\n",
                                ring->name, ret);
-               goto error;
+                       goto error_free_rbuf;
+               }
+
+               if (is_global_default_ctx) {
+                       ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+                       if (ret) {
+                               DRM_ERROR(
+                                       "Failed to pin and map ringbuffer %s: %d\n",
+                                       ring->name, ret);
+                               goto error_destroy_rbuf;
+                       }
+               }
+
        }
 
        ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
        if (ret) {
                DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
-               intel_destroy_ringbuffer_obj(ringbuf);
                goto error;
        }
 
        ctx->engine[ring->id].ringbuf = ringbuf;
        ctx->engine[ring->id].state = ctx_obj;
 
-       if (ctx == ring->default_context) {
-               /* The status page is offset 0 from the default context object
-                * in LRC mode. */
-               ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(ctx_obj);
-               ring->status_page.page_addr =
-                               kmap(sg_page(ctx_obj->pages->sgl));
-               if (ring->status_page.page_addr == NULL)
-                       return -ENOMEM;
-               ring->status_page.obj = ctx_obj;
-       }
+       if (ctx == ring->default_context)
+               lrc_setup_hardware_status_page(ring, ctx_obj);
 
        if (ring->id == RCS && !ctx->rcs_initialized) {
+               if (ring->init_context) {
+                       ret = ring->init_context(ring, ctx);
+                       if (ret)
+                               DRM_ERROR("ring init context: %d\n", ret);
+               }
+
                ret = intel_lr_context_render_state_init(ring, ctx);
                if (ret) {
                        DRM_ERROR("Init render state failed: %d\n", ret);
                        ctx->engine[ring->id].ringbuf = NULL;
                        ctx->engine[ring->id].state = NULL;
-                       intel_destroy_ringbuffer_obj(ringbuf);
                        goto error;
                }
                ctx->rcs_initialized = true;
@@ -1759,8 +1928,15 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
        return 0;
 
 error:
+       if (is_global_default_ctx)
+               intel_unpin_ringbuffer_obj(ringbuf);
+error_destroy_rbuf:
+       intel_destroy_ringbuffer_obj(ringbuf);
+error_free_rbuf:
        kfree(ringbuf);
-       i915_gem_object_ggtt_unpin(ctx_obj);
+error_unpin_ctx:
+       if (is_global_default_ctx)
+               i915_gem_object_ggtt_unpin(ctx_obj);
        drm_gem_object_unreference(&ctx_obj->base);
        return ret;
 }
index 33c3b4bf28c56322c77235575d12fd83532f947b..14b216b9be7f2fb5d33dd01c664e4e96d705d6bb 100644 (file)
@@ -24,6 +24,8 @@
 #ifndef _INTEL_LRC_H_
 #define _INTEL_LRC_H_
 
+#define GEN8_LR_CONTEXT_ALIGN 4096
+
 /* Execlists regs */
 #define RING_ELSP(ring)                        ((ring)->mmio_base+0x230)
 #define RING_EXECLIST_STATUS(ring)     ((ring)->mmio_base+0x234)
@@ -67,6 +69,8 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
 void intel_lr_context_free(struct intel_context *ctx);
 int intel_lr_context_deferred_create(struct intel_context *ctx,
                                     struct intel_engine_cs *ring);
+void intel_lr_context_unpin(struct intel_engine_cs *ring,
+               struct intel_context *ctx);
 
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@@ -104,11 +108,11 @@ struct intel_ctx_submit_request {
        u32 tail;
 
        struct list_head execlist_link;
-       struct work_struct work;
 
        int elsp_submitted;
 };
 
 void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
+void intel_execlists_retire_requests(struct intel_engine_cs *ring);
 
 #endif /* _INTEL_LRC_H_ */
index a6bd1422e38fd33fccc19f2fd78684291c0f54d3..14654d628ca42ad5fc3bd00e4ee56196c254ebf2 100644 (file)
@@ -76,7 +76,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
        u32 tmp;
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_enabled(dev_priv, power_domain))
+       if (!intel_display_power_is_enabled(dev_priv, power_domain))
                return false;
 
        tmp = I915_READ(lvds_encoder->reg);
@@ -899,6 +899,17 @@ void intel_lvds_init(struct drm_device *dev)
        int pipe;
        u8 pin;
 
+       /*
+        * Unlock registers and just leave them unlocked. Do this before
+        * checking quirk lists to avoid bogus WARNINGs.
+        */
+       if (HAS_PCH_SPLIT(dev)) {
+               I915_WRITE(PCH_PP_CONTROL,
+                          I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+       } else {
+               I915_WRITE(PP_CONTROL,
+                          I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+       }
        if (!intel_lvds_supported(dev))
                return;
 
@@ -1097,17 +1108,6 @@ out:
        lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
                                 LVDS_A3_POWER_MASK;
 
-       /*
-        * Unlock registers and just
-        * leave them unlocked
-        */
-       if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(PCH_PP_CONTROL,
-                          I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
-       } else {
-               I915_WRITE(PP_CONTROL,
-                          I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
-       }
        lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
        if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
                DRM_DEBUG_KMS("lid notifier registration failed\n");
@@ -1116,7 +1116,7 @@ out:
        drm_connector_register(connector);
 
        intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
-       intel_panel_setup_backlight(connector);
+       intel_panel_setup_backlight(connector, INVALID_PIPE);
 
        return;
 
index 0e018cb49147367f1fa1eacd5af6ed082ca29680..4d63839bd9b4c53be99842c38188331f8e7817c7 100644 (file)
@@ -521,6 +521,9 @@ static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+               return 0;
+
        return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
 }
 
@@ -536,15 +539,17 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
 {
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 val;
-       unsigned long flags;
+       struct intel_panel *panel = &connector->panel;
+       u32 val = 0;
 
-       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+       mutex_lock(&dev_priv->backlight_lock);
 
-       val = dev_priv->display.get_backlight(connector);
-       val = intel_panel_compute_brightness(connector, val);
+       if (panel->backlight.enabled) {
+               val = dev_priv->display.get_backlight(connector);
+               val = intel_panel_compute_brightness(connector, val);
+       }
 
-       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+       mutex_unlock(&dev_priv->backlight_lock);
 
        DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
        return val;
@@ -603,6 +608,9 @@ static void vlv_set_backlight(struct intel_connector *connector, u32 level)
        enum pipe pipe = intel_get_pipe_from_connector(connector);
        u32 tmp;
 
+       if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+               return;
+
        tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
        I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
 }
@@ -626,14 +634,12 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_panel *panel = &connector->panel;
-       enum pipe pipe = intel_get_pipe_from_connector(connector);
        u32 hw_level;
-       unsigned long flags;
 
-       if (!panel->backlight.present || pipe == INVALID_PIPE)
+       if (!panel->backlight.present)
                return;
 
-       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+       mutex_lock(&dev_priv->backlight_lock);
 
        WARN_ON(panel->backlight.max == 0);
 
@@ -643,7 +649,7 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
        if (panel->backlight.enabled)
                intel_panel_actually_set_backlight(connector, hw_level);
 
-       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+       mutex_unlock(&dev_priv->backlight_lock);
 }
 
 /* set backlight brightness to level in range [0..max], assuming hw min is
@@ -657,12 +663,17 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
        struct intel_panel *panel = &connector->panel;
        enum pipe pipe = intel_get_pipe_from_connector(connector);
        u32 hw_level;
-       unsigned long flags;
 
+       /*
+        * INVALID_PIPE may occur during driver init because
+        * connection_mutex isn't held across the entire backlight
+        * setup + modeset readout, and the BIOS can issue the
+        * requests at any time.
+        */
        if (!panel->backlight.present || pipe == INVALID_PIPE)
                return;
 
-       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+       mutex_lock(&dev_priv->backlight_lock);
 
        WARN_ON(panel->backlight.max == 0);
 
@@ -678,7 +689,7 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
        if (panel->backlight.enabled)
                intel_panel_actually_set_backlight(connector, hw_level);
 
-       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+       mutex_unlock(&dev_priv->backlight_lock);
 }
 
 static void pch_disable_backlight(struct intel_connector *connector)
@@ -720,6 +731,9 @@ static void vlv_disable_backlight(struct intel_connector *connector)
        enum pipe pipe = intel_get_pipe_from_connector(connector);
        u32 tmp;
 
+       if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+               return;
+
        intel_panel_actually_set_backlight(connector, 0);
 
        tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
@@ -731,10 +745,8 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_panel *panel = &connector->panel;
-       enum pipe pipe = intel_get_pipe_from_connector(connector);
-       unsigned long flags;
 
-       if (!panel->backlight.present || pipe == INVALID_PIPE)
+       if (!panel->backlight.present)
                return;
 
        /*
@@ -748,14 +760,14 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
                return;
        }
 
-       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+       mutex_lock(&dev_priv->backlight_lock);
 
        if (panel->backlight.device)
                panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
        panel->backlight.enabled = false;
        dev_priv->display.disable_backlight(connector);
 
-       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+       mutex_unlock(&dev_priv->backlight_lock);
 }
 
 static void bdw_enable_backlight(struct intel_connector *connector)
@@ -779,8 +791,9 @@ static void bdw_enable_backlight(struct intel_connector *connector)
        if (panel->backlight.active_low_pwm)
                pch_ctl1 |= BLM_PCH_POLARITY;
 
-       /* BDW always uses the pch pwm controls. */
-       pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
+       /* After LPT, override is the default. */
+       if (HAS_PCH_LPT(dev_priv))
+               pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
 
        I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
        POSTING_READ(BLC_PWM_PCH_CTL1);
@@ -909,6 +922,9 @@ static void vlv_enable_backlight(struct intel_connector *connector)
        enum pipe pipe = intel_get_pipe_from_connector(connector);
        u32 ctl, ctl2;
 
+       if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+               return;
+
        ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
        if (ctl2 & BLM_PWM_ENABLE) {
                DRM_DEBUG_KMS("backlight already enabled\n");
@@ -936,14 +952,13 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_panel *panel = &connector->panel;
        enum pipe pipe = intel_get_pipe_from_connector(connector);
-       unsigned long flags;
 
-       if (!panel->backlight.present || pipe == INVALID_PIPE)
+       if (!panel->backlight.present)
                return;
 
        DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
 
-       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+       mutex_lock(&dev_priv->backlight_lock);
 
        WARN_ON(panel->backlight.max == 0);
 
@@ -961,7 +976,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
        if (panel->backlight.device)
                panel->backlight.device->props.power = FB_BLANK_UNBLANK;
 
-       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+       mutex_unlock(&dev_priv->backlight_lock);
 }
 
 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
@@ -1030,6 +1045,9 @@ static int intel_backlight_device_register(struct intel_connector *connector)
        if (WARN_ON(panel->backlight.device))
                return -ENODEV;
 
+       if (!panel->backlight.present)
+               return 0;
+
        WARN_ON(panel->backlight.max == 0);
 
        memset(&props, 0, sizeof(props));
@@ -1065,6 +1083,10 @@ static int intel_backlight_device_register(struct intel_connector *connector)
                panel->backlight.device = NULL;
                return -ENODEV;
        }
+
+       DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n",
+                     connector->base.name);
+
        return 0;
 }
 
@@ -1098,15 +1120,28 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_panel *panel = &connector->panel;
+       int min;
 
        WARN_ON(panel->backlight.max == 0);
 
+       /*
+        * XXX: If the vbt value is 255, it makes min equal to max, which leads
+        * to problems. There are such machines out there. Either our
+        * interpretation is wrong or the vbt has bogus data. Or both. Safeguard
+        * against this by letting the minimum be at most (arbitrarily chosen)
+        * 25% of the max.
+        */
+       min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
+       if (min != dev_priv->vbt.backlight.min_brightness) {
+               DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n",
+                             dev_priv->vbt.backlight.min_brightness, min);
+       }
+
        /* vbt value is a coefficient in range [0..255] */
-       return scale(dev_priv->vbt.backlight.min_brightness, 0, 255,
-                    0, panel->backlight.max);
+       return scale(min, 0, 255, 0, panel->backlight.max);
 }
 
-static int bdw_setup_backlight(struct intel_connector *connector)
+static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unused)
 {
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1132,7 +1167,7 @@ static int bdw_setup_backlight(struct intel_connector *connector)
        return 0;
 }
 
-static int pch_setup_backlight(struct intel_connector *connector)
+static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
 {
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1159,7 +1194,7 @@ static int pch_setup_backlight(struct intel_connector *connector)
        return 0;
 }
 
-static int i9xx_setup_backlight(struct intel_connector *connector)
+static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
 {
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1191,7 +1226,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
        return 0;
 }
 
-static int i965_setup_backlight(struct intel_connector *connector)
+static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
 {
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1221,37 +1256,40 @@ static int i965_setup_backlight(struct intel_connector *connector)
        return 0;
 }
 
-static int vlv_setup_backlight(struct intel_connector *connector)
+static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
 {
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_panel *panel = &connector->panel;
-       enum pipe pipe;
+       enum pipe p;
        u32 ctl, ctl2, val;
 
-       for_each_pipe(dev_priv, pipe) {
-               u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+       for_each_pipe(dev_priv, p) {
+               u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(p));
 
                /* Skip if the modulation freq is already set */
                if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
                        continue;
 
                cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
-               I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
+               I915_WRITE(VLV_BLC_PWM_CTL(p), (0xf42 << 16) |
                           cur_val);
        }
 
-       ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
+       if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+               return -ENODEV;
+
+       ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
        panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
 
-       ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
+       ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
        panel->backlight.max = ctl >> 16;
        if (!panel->backlight.max)
                return -ENODEV;
 
        panel->backlight.min = get_backlight_min_vbt(connector);
 
-       val = _vlv_get_backlight(dev, PIPE_A);
+       val = _vlv_get_backlight(dev, pipe);
        panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
        panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
@@ -1260,13 +1298,12 @@ static int vlv_setup_backlight(struct intel_connector *connector)
        return 0;
 }
 
-int intel_panel_setup_backlight(struct drm_connector *connector)
+int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
 {
        struct drm_device *dev = connector->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct intel_panel *panel = &intel_connector->panel;
-       unsigned long flags;
        int ret;
 
        if (!dev_priv->vbt.backlight.present) {
@@ -1279,9 +1316,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
        }
 
        /* set level and max in panel struct */
-       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
-       ret = dev_priv->display.setup_backlight(intel_connector);
-       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+       mutex_lock(&dev_priv->backlight_lock);
+       ret = dev_priv->display.setup_backlight(intel_connector, pipe);
+       mutex_unlock(&dev_priv->backlight_lock);
 
        if (ret) {
                DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
@@ -1289,15 +1326,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
                return ret;
        }
 
-       intel_backlight_device_register(intel_connector);
-
        panel->backlight.present = true;
 
-       DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, "
-                     "sysfs interface %sregistered\n",
+       DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
+                     connector->name,
                      panel->backlight.enabled ? "enabled" : "disabled",
-                     panel->backlight.level, panel->backlight.max,
-                     panel->backlight.device ? "" : "not ");
+                     panel->backlight.level, panel->backlight.max);
 
        return 0;
 }
@@ -1308,7 +1342,6 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
        struct intel_panel *panel = &intel_connector->panel;
 
        panel->backlight.present = false;
-       intel_backlight_device_unregister(intel_connector);
 }
 
 /* Set up chip specific backlight functions */
@@ -1316,7 +1349,7 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (IS_BROADWELL(dev)) {
+       if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) {
                dev_priv->display.setup_backlight = bdw_setup_backlight;
                dev_priv->display.enable_backlight = bdw_enable_backlight;
                dev_priv->display.disable_backlight = pch_disable_backlight;
@@ -1371,3 +1404,19 @@ void intel_panel_fini(struct intel_panel *panel)
                drm_mode_destroy(intel_connector->base.dev,
                                panel->downclock_mode);
 }
+
+void intel_backlight_register(struct drm_device *dev)
+{
+       struct intel_connector *connector;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
+               intel_backlight_device_register(connector);
+}
+
+void intel_backlight_unregister(struct drm_device *dev)
+{
+       struct intel_connector *connector;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
+               intel_backlight_device_unregister(connector);
+}
index c27b6140bfd10e3912006f994da2bb75090c6b82..9af0af49382e6b88f7101cf19a48768cd5848aad 100644 (file)
@@ -30,9 +30,6 @@
 #include "intel_drv.h"
 #include "../../../platform/x86/intel_ips.h"
 #include <linux/module.h>
-#include <linux/vgaarb.h>
-#include <drm/i915_powerwell.h>
-#include <linux/pm_runtime.h>
 
 /**
  * RC6 is a special power stage which allows the GPU to enter an very
  * i915.i915_enable_fbc parameter
  */
 
+static void gen9_init_clock_gating(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /*
+        * WaDisableSDEUnitClockGating:skl
+        * This seems to be a pre-production w/a.
+        */
+       I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+                  GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+       /*
+        * WaDisableDgMirrorFixInHalfSliceChicken5:skl
+        * This is a pre-production w/a.
+        */
+       I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
+                  I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
+                  ~GEN9_DG_MIRROR_FIX_ENABLE);
+
+       /* Wa4x4STCOptimizationDisable:skl */
+       I915_WRITE(CACHE_MODE_1,
+                  _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
+}
+
 static void i8xx_disable_fbc(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 fbc_ctl;
 
+       dev_priv->fbc.enabled = false;
+
        /* Disable compression */
        fbc_ctl = I915_READ(FBC_CONTROL);
        if ((fbc_ctl & FBC_CTL_EN) == 0)
@@ -99,6 +122,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
        int i;
        u32 fbc_ctl;
 
+       dev_priv->fbc.enabled = true;
+
        cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
        if (fb->pitches[0] < cfb_pitch)
                cfb_pitch = fb->pitches[0];
@@ -153,6 +178,8 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
 
+       dev_priv->fbc.enabled = true;
+
        dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
        if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
                dpfc_ctl |= DPFC_CTL_LIMIT_2X;
@@ -173,6 +200,8 @@ static void g4x_disable_fbc(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 dpfc_ctl;
 
+       dev_priv->fbc.enabled = false;
+
        /* Disable compression */
        dpfc_ctl = I915_READ(DPFC_CONTROL);
        if (dpfc_ctl & DPFC_CTL_EN) {
@@ -224,6 +253,8 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
 
+       dev_priv->fbc.enabled = true;
+
        dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
        if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
                dev_priv->fbc.threshold++;
@@ -264,6 +295,8 @@ static void ironlake_disable_fbc(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 dpfc_ctl;
 
+       dev_priv->fbc.enabled = false;
+
        /* Disable compression */
        dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
        if (dpfc_ctl & DPFC_CTL_EN) {
@@ -290,6 +323,8 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
 
+       dev_priv->fbc.enabled = true;
+
        dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
        if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
                dev_priv->fbc.threshold++;
@@ -339,19 +374,19 @@ bool intel_fbc_enabled(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (!dev_priv->display.fbc_enabled)
-               return false;
-
-       return dev_priv->display.fbc_enabled(dev);
+       return dev_priv->fbc.enabled;
 }
 
-void gen8_fbc_sw_flush(struct drm_device *dev, u32 value)
+void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (!IS_GEN8(dev))
                return;
 
+       if (!intel_fbc_enabled(dev))
+               return;
+
        I915_WRITE(MSG_FBC_REND_STATE, value);
 }
 
@@ -1310,6 +1345,7 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
                                      int *prec_mult,
                                      int *drain_latency)
 {
+       struct drm_device *dev = crtc->dev;
        int entries;
        int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
 
@@ -1320,8 +1356,12 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
                return false;
 
        entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
-       *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
-                                      DRAIN_LATENCY_PRECISION_32;
+       if (IS_CHERRYVIEW(dev))
+               *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
+                                              DRAIN_LATENCY_PRECISION_16;
+       else
+               *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
+                                              DRAIN_LATENCY_PRECISION_32;
        *drain_latency = (64 * (*prec_mult) * 4) / entries;
 
        if (*drain_latency > DRAIN_LATENCY_MASK)
@@ -1340,15 +1380,18 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
 
 static void vlv_update_drain_latency(struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pixel_size;
        int drain_latency;
        enum pipe pipe = intel_crtc->pipe;
        int plane_prec, prec_mult, plane_dl;
+       const int high_precision = IS_CHERRYVIEW(dev) ?
+               DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
 
-       plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_64 |
-                  DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_64 |
+       plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
+                  DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
                   (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
 
        if (!intel_crtc_active(crtc)) {
@@ -1359,9 +1402,9 @@ static void vlv_update_drain_latency(struct drm_crtc *crtc)
        /* Primary plane Drain Latency */
        pixel_size = crtc->primary->fb->bits_per_pixel / 8;     /* BPP */
        if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
-               plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
-                                          DDL_PLANE_PRECISION_64 :
-                                          DDL_PLANE_PRECISION_32;
+               plane_prec = (prec_mult == high_precision) ?
+                                          DDL_PLANE_PRECISION_HIGH :
+                                          DDL_PLANE_PRECISION_LOW;
                plane_dl |= plane_prec | drain_latency;
        }
 
@@ -1373,9 +1416,9 @@ static void vlv_update_drain_latency(struct drm_crtc *crtc)
        /* Program cursor DL only if it is enabled */
        if (intel_crtc->cursor_base &&
            vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
-               plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
-                                          DDL_CURSOR_PRECISION_64 :
-                                          DDL_CURSOR_PRECISION_32;
+               plane_prec = (prec_mult == high_precision) ?
+                                          DDL_CURSOR_PRECISION_HIGH :
+                                          DDL_CURSOR_PRECISION_LOW;
                plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
        }
 
@@ -1543,15 +1586,17 @@ static void valleyview_update_sprite_wm(struct drm_plane *plane,
        int plane_prec;
        int sprite_dl;
        int prec_mult;
+       const int high_precision = IS_CHERRYVIEW(dev) ?
+               DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
 
-       sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_64(sprite) |
+       sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
                    (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
 
        if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
                                                 &drain_latency)) {
-               plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
-                                          DDL_SPRITE_PRECISION_64(sprite) :
-                                          DDL_SPRITE_PRECISION_32(sprite);
+               plane_prec = (prec_mult == high_precision) ?
+                                          DDL_SPRITE_PRECISION_HIGH(sprite) :
+                                          DDL_SPRITE_PRECISION_LOW(sprite);
                sprite_dl |= plane_prec |
                             (drain_latency << DDL_SPRITE_SHIFT(sprite));
        }
@@ -1915,6 +1960,14 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
        return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
 }
 
+struct skl_pipe_wm_parameters {
+       bool active;
+       uint32_t pipe_htotal;
+       uint32_t pixel_rate; /* in KHz */
+       struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
+       struct intel_plane_wm_parameters cursor;
+};
+
 struct ilk_pipe_wm_parameters {
        bool active;
        uint32_t pipe_htotal;
@@ -2226,11 +2279,82 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
               PIPE_WM_LINETIME_TIME(linetime);
 }
 
-static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
+static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+       if (IS_GEN9(dev)) {
+               uint32_t val;
+               int ret, i;
+               int level, max_level = ilk_wm_max_level(dev);
+
+               /* read the first set of memory latencies[0:3] */
+               val = 0; /* data0 to be programmed to 0 for first set */
+               mutex_lock(&dev_priv->rps.hw_lock);
+               ret = sandybridge_pcode_read(dev_priv,
+                                            GEN9_PCODE_READ_MEM_LATENCY,
+                                            &val);
+               mutex_unlock(&dev_priv->rps.hw_lock);
+
+               if (ret) {
+                       DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+                       return;
+               }
+
+               wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
+               wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+                               GEN9_MEM_LATENCY_LEVEL_MASK;
+               wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+                               GEN9_MEM_LATENCY_LEVEL_MASK;
+               wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+                               GEN9_MEM_LATENCY_LEVEL_MASK;
+
+               /* read the second set of memory latencies[4:7] */
+               val = 1; /* data0 to be programmed to 1 for second set */
+               mutex_lock(&dev_priv->rps.hw_lock);
+               ret = sandybridge_pcode_read(dev_priv,
+                                            GEN9_PCODE_READ_MEM_LATENCY,
+                                            &val);
+               mutex_unlock(&dev_priv->rps.hw_lock);
+               if (ret) {
+                       DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+                       return;
+               }
+
+               wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
+               wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+                               GEN9_MEM_LATENCY_LEVEL_MASK;
+               wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+                               GEN9_MEM_LATENCY_LEVEL_MASK;
+               wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+                               GEN9_MEM_LATENCY_LEVEL_MASK;
+
+               /*
+                * punit doesn't take into account the read latency so we need
+                * to add 2us to the various latency levels we retrieve from
+                * the punit.
+                *   - W0 is a bit special in that it's the only level that
+                *   can't be disabled if we want to have display working, so
+                *   we always add 2us there.
+                *   - For levels >=1, punit returns 0us latency when they are
+                *   disabled, so we respect that and don't add 2us then
+                *
+                * Additionally, if a level n (n > 1) has a 0us latency, all
+                * levels m (m >= n) need to be disabled. We make sure to
+                * sanitize the values out of the punit to satisfy this
+                * requirement.
+                */
+               wm[0] += 2;
+               for (level = 1; level <= max_level; level++)
+                       if (wm[level] != 0)
+                               wm[level] += 2;
+                       else {
+                               for (i = level + 1; i <= max_level; i++)
+                                       wm[i] = 0;
+
+                               break;
+                       }
+       } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                uint64_t sskpd = I915_READ64(MCH_SSKPD);
 
                wm[0] = (sskpd >> 56) & 0xFF;
@@ -2278,7 +2402,9 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
 int ilk_wm_max_level(const struct drm_device *dev)
 {
        /* how many WM levels are we expecting */
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+       if (IS_GEN9(dev))
+               return 7;
+       else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                return 4;
        else if (INTEL_INFO(dev)->gen >= 6)
                return 3;
@@ -2288,7 +2414,7 @@ int ilk_wm_max_level(const struct drm_device *dev)
 
 static void intel_print_wm_latency(struct drm_device *dev,
                                   const char *name,
-                                  const uint16_t wm[5])
+                                  const uint16_t wm[8])
 {
        int level, max_level = ilk_wm_max_level(dev);
 
@@ -2301,8 +2427,13 @@ static void intel_print_wm_latency(struct drm_device *dev,
                        continue;
                }
 
-               /* WM1+ latency values in 0.5us units */
-               if (level > 0)
+               /*
+                * - latencies are in us on gen9.
+                * - before then, WM1+ latency values are in 0.5us units
+                */
+               if (IS_GEN9(dev))
+                       latency *= 10;
+               else if (level > 0)
                        latency *= 5;
 
                DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
@@ -2370,6 +2501,14 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
                snb_wm_latency_quirk(dev);
 }
 
+static void skl_setup_wm_latency(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
+       intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
+}
+
 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
                                      struct ilk_pipe_wm_parameters *p)
 {
@@ -2860,4342 +2999,4077 @@ static bool ilk_disable_lp_wm(struct drm_device *dev)
        return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
 }
 
-static void ilk_update_wm(struct drm_crtc *crtc)
-{
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct ilk_wm_maximums max;
-       struct ilk_pipe_wm_parameters params = {};
-       struct ilk_wm_values results = {};
-       enum intel_ddb_partitioning partitioning;
-       struct intel_pipe_wm pipe_wm = {};
-       struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
-       struct intel_wm_config config = {};
+/*
+ * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
+ * different active planes.
+ */
 
-       ilk_compute_wm_parameters(crtc, &params);
+#define SKL_DDB_SIZE           896     /* in blocks */
 
-       intel_compute_pipe_wm(crtc, &params, &pipe_wm);
+static void
+skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
+                                  struct drm_crtc *for_crtc,
+                                  const struct intel_wm_config *config,
+                                  const struct skl_pipe_wm_parameters *params,
+                                  struct skl_ddb_entry *alloc /* out */)
+{
+       struct drm_crtc *crtc;
+       unsigned int pipe_size, ddb_size;
+       int nth_active_pipe;
 
-       if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
+       if (!params->active) {
+               alloc->start = 0;
+               alloc->end = 0;
                return;
+       }
 
-       intel_crtc->wm.active = pipe_wm;
+       ddb_size = SKL_DDB_SIZE;
 
-       ilk_compute_wm_config(dev, &config);
+       ddb_size -= 4; /* 4 blocks for bypass path allocation */
 
-       ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
-       ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
+       nth_active_pipe = 0;
+       for_each_crtc(dev, crtc) {
+               if (!intel_crtc_active(crtc))
+                       continue;
 
-       /* 5/6 split only in single pipe config on IVB+ */
-       if (INTEL_INFO(dev)->gen >= 7 &&
-           config.num_pipes_active == 1 && config.sprites_enabled) {
-               ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
-               ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
+               if (crtc == for_crtc)
+                       break;
 
-               best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
-       } else {
-               best_lp_wm = &lp_wm_1_2;
+               nth_active_pipe++;
        }
 
-       partitioning = (best_lp_wm == &lp_wm_1_2) ?
-                      INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
+       pipe_size = ddb_size / config->num_pipes_active;
+       alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
+       alloc->end = alloc->start + pipe_size;
+}
 
-       ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
+{
+       if (config->num_pipes_active == 1)
+               return 32;
 
-       ilk_write_wm_values(dev_priv, &results);
+       return 8;
 }
 
-static void
-ilk_update_sprite_wm(struct drm_plane *plane,
-                    struct drm_crtc *crtc,
-                    uint32_t sprite_width, uint32_t sprite_height,
-                    int pixel_size, bool enabled, bool scaled)
+static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
 {
-       struct drm_device *dev = plane->dev;
-       struct intel_plane *intel_plane = to_intel_plane(plane);
+       entry->start = reg & 0x3ff;
+       entry->end = (reg >> 16) & 0x3ff;
+       if (entry->end)
+               entry->end += 1;
+}
 
-       intel_plane->wm.enabled = enabled;
-       intel_plane->wm.scaled = scaled;
-       intel_plane->wm.horiz_pixels = sprite_width;
-       intel_plane->wm.vert_pixels = sprite_width;
-       intel_plane->wm.bytes_per_pixel = pixel_size;
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+                         struct skl_ddb_allocation *ddb /* out */)
+{
+       struct drm_device *dev = dev_priv->dev;
+       enum pipe pipe;
+       int plane;
+       u32 val;
 
-       /*
-        * IVB workaround: must disable low power watermarks for at least
-        * one frame before enabling scaling.  LP watermarks can be re-enabled
-        * when scaling is disabled.
-        *
-        * WaCxSRDisabledForSpriteScaling:ivb
-        */
-       if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
-               intel_wait_for_vblank(dev, intel_plane->pipe);
+       for_each_pipe(dev_priv, pipe) {
+               for_each_plane(pipe, plane) {
+                       val = I915_READ(PLANE_BUF_CFG(pipe, plane));
+                       skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
+                                                  val);
+               }
 
-       ilk_update_wm(crtc);
+               val = I915_READ(CUR_BUF_CFG(pipe));
+               skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val);
+       }
 }
 
-static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+static unsigned int
+skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p)
+{
+       return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
+}
+
+/*
+ * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
+ * a 8192x4096@32bpp framebuffer:
+ *   3 * 4096 * 8192  * 4 < 2^32
+ */
+static unsigned int
+skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
+                                const struct skl_pipe_wm_parameters *params)
+{
+       unsigned int total_data_rate = 0;
+       int plane;
+
+       for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
+               const struct intel_plane_wm_parameters *p;
+
+               p = &params->plane[plane];
+               if (!p->enabled)
+                       continue;
+
+               total_data_rate += skl_plane_relative_data_rate(p);
+       }
+
+       return total_data_rate;
+}
+
+static void
+skl_allocate_pipe_ddb(struct drm_crtc *crtc,
+                     const struct intel_wm_config *config,
+                     const struct skl_pipe_wm_parameters *params,
+                     struct skl_ddb_allocation *ddb /* out */)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct ilk_wm_values *hw = &dev_priv->wm.hw;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_pipe_wm *active = &intel_crtc->wm.active;
        enum pipe pipe = intel_crtc->pipe;
-       static const unsigned int wm0_pipe_reg[] = {
-               [PIPE_A] = WM0_PIPEA_ILK,
-               [PIPE_B] = WM0_PIPEB_ILK,
-               [PIPE_C] = WM0_PIPEC_IVB,
-       };
+       struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
+       uint16_t alloc_size, start, cursor_blocks;
+       unsigned int total_data_rate;
+       int plane;
+
+       skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
+       alloc_size = skl_ddb_entry_size(alloc);
+       if (alloc_size == 0) {
+               memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+               memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe]));
+               return;
+       }
 
-       hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-               hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+       cursor_blocks = skl_cursor_allocation(config);
+       ddb->cursor[pipe].start = alloc->end - cursor_blocks;
+       ddb->cursor[pipe].end = alloc->end;
 
-       active->pipe_enabled = intel_crtc_active(crtc);
+       alloc_size -= cursor_blocks;
+       alloc->end -= cursor_blocks;
 
-       if (active->pipe_enabled) {
-               u32 tmp = hw->wm_pipe[pipe];
+       /*
+        * Each active plane get a portion of the remaining space, in
+        * proportion to the amount of data they need to fetch from memory.
+        *
+        * FIXME: we may not allocate every single block here.
+        */
+       total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
 
-               /*
-                * For active pipes LP0 watermark is marked as
-                * enabled, and LP1+ watermaks as disabled since
-                * we can't really reverse compute them in case
-                * multiple pipes are active.
-                */
-               active->wm[0].enable = true;
-               active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
-               active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
-               active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
-               active->linetime = hw->wm_linetime[pipe];
-       } else {
-               int level, max_level = ilk_wm_max_level(dev);
+       start = alloc->start;
+       for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
+               const struct intel_plane_wm_parameters *p;
+               unsigned int data_rate;
+               uint16_t plane_blocks;
+
+               p = &params->plane[plane];
+               if (!p->enabled)
+                       continue;
+
+               data_rate = skl_plane_relative_data_rate(p);
 
                /*
-                * For inactive pipes, all watermark levels
-                * should be marked as enabled but zeroed,
-                * which is what we'd compute them to.
+                * promote the expression to 64 bits to avoid overflowing, the
+                * result is < available as data_rate / total_data_rate < 1
                 */
-               for (level = 0; level <= max_level; level++)
-                       active->wm[level].enable = true;
+               plane_blocks = div_u64((uint64_t)alloc_size * data_rate,
+                                      total_data_rate);
+
+               ddb->plane[pipe][plane].start = start;
+               ddb->plane[pipe][plane].end = start + plane_blocks;
+
+               start += plane_blocks;
        }
+
 }
 
-void ilk_wm_get_hw_state(struct drm_device *dev)
+static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_config *config)
+{
+       /* TODO: Take into account the scalers once we support them */
+       return config->adjusted_mode.crtc_clock;
+}
+
+/*
+ * The max latency should be 257 (max the punit can code is 255 and we add 2us
+ * for the read latency) and bytes_per_pixel should always be <= 8, so that
+ * should allow pixel_rate up to ~2 GHz which seems sufficient since max
+ * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
+*/
+static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
+                              uint32_t latency)
+{
+       uint32_t wm_intermediate_val, ret;
+
+       if (latency == 0)
+               return UINT_MAX;
+
+       wm_intermediate_val = latency * pixel_rate * bytes_per_pixel;
+       ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
+
+       return ret;
+}
+
+static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
+                              uint32_t horiz_pixels, uint8_t bytes_per_pixel,
+                              uint32_t latency)
+{
+       uint32_t ret, plane_bytes_per_line, wm_intermediate_val;
+
+       if (latency == 0)
+               return UINT_MAX;
+
+       plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
+       wm_intermediate_val = latency * pixel_rate;
+       ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
+                               plane_bytes_per_line;
+
+       return ret;
+}
+
+static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
+                                      const struct intel_crtc *intel_crtc)
 {
+       struct drm_device *dev = intel_crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct ilk_wm_values *hw = &dev_priv->wm.hw;
+       const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+       enum pipe pipe = intel_crtc->pipe;
+
+       if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
+                  sizeof(new_ddb->plane[pipe])))
+               return true;
+
+       if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe],
+                   sizeof(new_ddb->cursor[pipe])))
+               return true;
+
+       return false;
+}
+
+static void skl_compute_wm_global_parameters(struct drm_device *dev,
+                                            struct intel_wm_config *config)
+{
        struct drm_crtc *crtc;
+       struct drm_plane *plane;
 
-       for_each_crtc(dev, crtc)
-               ilk_pipe_wm_get_hw_state(crtc);
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               config->num_pipes_active += intel_crtc_active(crtc);
 
-       hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
-       hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
-       hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
+       /* FIXME: I don't think we need those two global parameters on SKL */
+       list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+               struct intel_plane *intel_plane = to_intel_plane(plane);
 
-       hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
-       if (INTEL_INFO(dev)->gen >= 7) {
-               hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
-               hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+               config->sprites_enabled |= intel_plane->wm.enabled;
+               config->sprites_scaled |= intel_plane->wm.scaled;
        }
+}
 
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-               hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
-                       INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
-       else if (IS_IVYBRIDGE(dev))
-               hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
-                       INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
+                                          struct skl_pipe_wm_parameters *p)
+{
+       struct drm_device *dev = crtc->dev;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum pipe pipe = intel_crtc->pipe;
+       struct drm_plane *plane;
+       int i = 1; /* Index for sprite planes start */
 
-       hw->enable_fbc_wm =
-               !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
-}
+       p->active = intel_crtc_active(crtc);
+       if (p->active) {
+               p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
+               p->pixel_rate = skl_pipe_pixel_rate(&intel_crtc->config);
 
-/**
- * intel_update_watermarks - update FIFO watermark values based on current modes
- *
- * Calculate watermark values for the various WM regs based on current mode
- * and plane configuration.
- *
- * There are several cases to deal with here:
- *   - normal (i.e. non-self-refresh)
- *   - self-refresh (SR) mode
- *   - lines are large relative to FIFO size (buffer can hold up to 2)
- *   - lines are small relative to FIFO size (buffer can hold more than 2
- *     lines), so need to account for TLB latency
- *
- *   The normal calculation is:
- *     watermark = dotclock * bytes per pixel * latency
- *   where latency is platform & configuration dependent (we assume pessimal
- *   values here).
- *
- *   The SR calculation is:
- *     watermark = (trunc(latency/line time)+1) * surface width *
- *       bytes per pixel
- *   where
- *     line time = htotal / dotclock
- *     surface width = hdisplay for normal plane and 64 for cursor
- *   and latency is assumed to be high, as above.
- *
- * The final value programmed to the register should always be rounded up,
- * and include an extra 2 entries to account for clock crossings.
- *
- * We don't use the sprite, so we can ignore that.  And on Crestline we have
- * to set the non-SR watermarks to 8.
- */
-void intel_update_watermarks(struct drm_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+               /*
+                * For now, assume primary and cursor planes are always enabled.
+                */
+               p->plane[0].enabled = true;
+               p->plane[0].bytes_per_pixel =
+                       crtc->primary->fb->bits_per_pixel / 8;
+               p->plane[0].horiz_pixels = intel_crtc->config.pipe_src_w;
+               p->plane[0].vert_pixels = intel_crtc->config.pipe_src_h;
 
-       if (dev_priv->display.update_wm)
-               dev_priv->display.update_wm(crtc);
-}
+               p->cursor.enabled = true;
+               p->cursor.bytes_per_pixel = 4;
+               p->cursor.horiz_pixels = intel_crtc->cursor_width ?
+                                        intel_crtc->cursor_width : 64;
+       }
 
-void intel_update_sprite_watermarks(struct drm_plane *plane,
-                                   struct drm_crtc *crtc,
-                                   uint32_t sprite_width,
-                                   uint32_t sprite_height,
-                                   int pixel_size,
-                                   bool enabled, bool scaled)
-{
-       struct drm_i915_private *dev_priv = plane->dev->dev_private;
+       list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+               struct intel_plane *intel_plane = to_intel_plane(plane);
 
-       if (dev_priv->display.update_sprite_wm)
-               dev_priv->display.update_sprite_wm(plane, crtc,
-                                                  sprite_width, sprite_height,
-                                                  pixel_size, enabled, scaled);
+               if (intel_plane->pipe == pipe)
+                       p->plane[i++] = intel_plane->wm;
+       }
 }
 
-static struct drm_i915_gem_object *
-intel_alloc_context_page(struct drm_device *dev)
+static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters *p,
+                                struct intel_plane_wm_parameters *p_params,
+                                uint16_t ddb_allocation,
+                                uint32_t mem_value,
+                                uint16_t *out_blocks, /* out */
+                                uint8_t *out_lines /* out */)
 {
-       struct drm_i915_gem_object *ctx;
-       int ret;
+       uint32_t method1, method2, plane_bytes_per_line, res_blocks, res_lines;
+       uint32_t result_bytes;
 
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       if (mem_value == 0 || !p->active || !p_params->enabled)
+               return false;
 
-       ctx = i915_gem_alloc_object(dev, 4096);
-       if (!ctx) {
-               DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
-               return NULL;
-       }
+       method1 = skl_wm_method1(p->pixel_rate,
+                                p_params->bytes_per_pixel,
+                                mem_value);
+       method2 = skl_wm_method2(p->pixel_rate,
+                                p->pipe_htotal,
+                                p_params->horiz_pixels,
+                                p_params->bytes_per_pixel,
+                                mem_value);
 
-       ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
-       if (ret) {
-               DRM_ERROR("failed to pin power context: %d\n", ret);
-               goto err_unref;
-       }
+       plane_bytes_per_line = p_params->horiz_pixels *
+                                       p_params->bytes_per_pixel;
 
-       ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
-       if (ret) {
-               DRM_ERROR("failed to set-domain on power context: %d\n", ret);
-               goto err_unpin;
-       }
+       /* For now xtile and linear */
+       if (((ddb_allocation * 512) / plane_bytes_per_line) >= 1)
+               result_bytes = min(method1, method2);
+       else
+               result_bytes = method1;
 
-       return ctx;
+       res_blocks = DIV_ROUND_UP(result_bytes, 512) + 1;
+       res_lines = DIV_ROUND_UP(result_bytes, plane_bytes_per_line);
 
-err_unpin:
-       i915_gem_object_ggtt_unpin(ctx);
-err_unref:
-       drm_gem_object_unreference(&ctx->base);
-       return NULL;
-}
+       if (res_blocks > ddb_allocation || res_lines > 31)
+               return false;
 
-/**
- * Lock protecting IPS related data structures
- */
-DEFINE_SPINLOCK(mchdev_lock);
+       *out_blocks = res_blocks;
+       *out_lines = res_lines;
 
-/* Global for IPS driver to get at the current i915 device. Protected by
- * mchdev_lock. */
-static struct drm_i915_private *i915_mch_dev;
+       return true;
+}
 
-bool ironlake_set_drps(struct drm_device *dev, u8 val)
+static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
+                                struct skl_ddb_allocation *ddb,
+                                struct skl_pipe_wm_parameters *p,
+                                enum pipe pipe,
+                                int level,
+                                int num_planes,
+                                struct skl_wm_level *result)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u16 rgvswctl;
+       uint16_t latency = dev_priv->wm.skl_latency[level];
+       uint16_t ddb_blocks;
+       int i;
 
-       assert_spin_locked(&mchdev_lock);
+       for (i = 0; i < num_planes; i++) {
+               ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
 
-       rgvswctl = I915_READ16(MEMSWCTL);
-       if (rgvswctl & MEMCTL_CMD_STS) {
-               DRM_DEBUG("gpu busy, RCS change rejected\n");
-               return false; /* still busy with another command */
+               result->plane_en[i] = skl_compute_plane_wm(p, &p->plane[i],
+                                               ddb_blocks,
+                                               latency,
+                                               &result->plane_res_b[i],
+                                               &result->plane_res_l[i]);
        }
 
-       rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
-               (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
-       I915_WRITE16(MEMSWCTL, rgvswctl);
-       POSTING_READ16(MEMSWCTL);
+       ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
+       result->cursor_en = skl_compute_plane_wm(p, &p->cursor, ddb_blocks,
+                                                latency, &result->cursor_res_b,
+                                                &result->cursor_res_l);
+}
 
-       rgvswctl |= MEMCTL_CMD_STS;
-       I915_WRITE16(MEMSWCTL, rgvswctl);
+static uint32_t
+skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
+{
+       if (!intel_crtc_active(crtc))
+               return 0;
+
+       return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
 
-       return true;
 }
 
-static void ironlake_enable_drps(struct drm_device *dev)
+static void skl_compute_transition_wm(struct drm_crtc *crtc,
+                                     struct skl_pipe_wm_parameters *params,
+                                     struct skl_wm_level *trans_wm /* out */)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 rgvmodectl = I915_READ(MEMMODECTL);
-       u8 fmax, fmin, fstart, vstart;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int i;
 
-       spin_lock_irq(&mchdev_lock);
+       if (!params->active)
+               return;
 
-       /* Enable temp reporting */
-       I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
-       I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+       /* Until we know more, just disable transition WMs */
+       for (i = 0; i < intel_num_planes(intel_crtc); i++)
+               trans_wm->plane_en[i] = false;
+       trans_wm->cursor_en = false;
+}
 
-       /* 100ms RC evaluation intervals */
-       I915_WRITE(RCUPEI, 100000);
-       I915_WRITE(RCDNEI, 100000);
+static void skl_compute_pipe_wm(struct drm_crtc *crtc,
+                               struct skl_ddb_allocation *ddb,
+                               struct skl_pipe_wm_parameters *params,
+                               struct skl_pipe_wm *pipe_wm)
+{
+       struct drm_device *dev = crtc->dev;
+       const struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int level, max_level = ilk_wm_max_level(dev);
 
-       /* Set max/min thresholds to 90ms and 80ms respectively */
-       I915_WRITE(RCBMAXAVG, 90000);
-       I915_WRITE(RCBMINAVG, 80000);
+       for (level = 0; level <= max_level; level++) {
+               skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
+                                    level, intel_num_planes(intel_crtc),
+                                    &pipe_wm->wm[level]);
+       }
+       pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
 
-       I915_WRITE(MEMIHYST, 1);
+       skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
+}
 
-       /* Set up min, max, and cur for interrupt handling */
-       fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
-       fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
-       fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
-               MEMMODE_FSTART_SHIFT;
+static void skl_compute_wm_results(struct drm_device *dev,
+                                  struct skl_pipe_wm_parameters *p,
+                                  struct skl_pipe_wm *p_wm,
+                                  struct skl_wm_values *r,
+                                  struct intel_crtc *intel_crtc)
+{
+       int level, max_level = ilk_wm_max_level(dev);
+       enum pipe pipe = intel_crtc->pipe;
+       uint32_t temp;
+       int i;
 
-       vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
-               PXVFREQ_PX_SHIFT;
+       for (level = 0; level <= max_level; level++) {
+               for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+                       temp = 0;
 
-       dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
-       dev_priv->ips.fstart = fstart;
+                       temp |= p_wm->wm[level].plane_res_l[i] <<
+                                       PLANE_WM_LINES_SHIFT;
+                       temp |= p_wm->wm[level].plane_res_b[i];
+                       if (p_wm->wm[level].plane_en[i])
+                               temp |= PLANE_WM_EN;
 
-       dev_priv->ips.max_delay = fstart;
-       dev_priv->ips.min_delay = fmin;
-       dev_priv->ips.cur_delay = fstart;
+                       r->plane[pipe][i][level] = temp;
+               }
 
-       DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
-                        fmax, fmin, fstart);
+               temp = 0;
 
-       I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+               temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT;
+               temp |= p_wm->wm[level].cursor_res_b;
 
-       /*
-        * Interrupts will be enabled in ironlake_irq_postinstall
-        */
+               if (p_wm->wm[level].cursor_en)
+                       temp |= PLANE_WM_EN;
 
-       I915_WRITE(VIDSTART, vstart);
-       POSTING_READ(VIDSTART);
+               r->cursor[pipe][level] = temp;
 
-       rgvmodectl |= MEMMODE_SWMODE_EN;
-       I915_WRITE(MEMMODECTL, rgvmodectl);
+       }
 
-       if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
-               DRM_ERROR("stuck trying to change perf mode\n");
-       mdelay(1);
+       /* transition WMs */
+       for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+               temp = 0;
+               temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
+               temp |= p_wm->trans_wm.plane_res_b[i];
+               if (p_wm->trans_wm.plane_en[i])
+                       temp |= PLANE_WM_EN;
 
-       ironlake_set_drps(dev, fstart);
+               r->plane_trans[pipe][i] = temp;
+       }
 
-       dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
-               I915_READ(0x112e0);
-       dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
-       dev_priv->ips.last_count2 = I915_READ(0x112f4);
-       dev_priv->ips.last_time2 = ktime_get_raw_ns();
+       temp = 0;
+       temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT;
+       temp |= p_wm->trans_wm.cursor_res_b;
+       if (p_wm->trans_wm.cursor_en)
+               temp |= PLANE_WM_EN;
 
-       spin_unlock_irq(&mchdev_lock);
+       r->cursor_trans[pipe] = temp;
+
+       r->wm_linetime[pipe] = p_wm->linetime;
 }
 
-static void ironlake_disable_drps(struct drm_device *dev)
+static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
+                               const struct skl_ddb_entry *entry)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u16 rgvswctl;
+       if (entry->end)
+               I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
+       else
+               I915_WRITE(reg, 0);
+}
 
-       spin_lock_irq(&mchdev_lock);
+static void skl_write_wm_values(struct drm_i915_private *dev_priv,
+                               const struct skl_wm_values *new)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct intel_crtc *crtc;
 
-       rgvswctl = I915_READ16(MEMSWCTL);
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+               int i, level, max_level = ilk_wm_max_level(dev);
+               enum pipe pipe = crtc->pipe;
 
-       /* Ack interrupts, disable EFC interrupt */
-       I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
-       I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
-       I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
-       I915_WRITE(DEIIR, DE_PCU_EVENT);
-       I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
+               if (!new->dirty[pipe])
+                       continue;
 
-       /* Go back to the starting frequency */
-       ironlake_set_drps(dev, dev_priv->ips.fstart);
-       mdelay(1);
-       rgvswctl |= MEMCTL_CMD_STS;
-       I915_WRITE(MEMSWCTL, rgvswctl);
-       mdelay(1);
+               I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
 
-       spin_unlock_irq(&mchdev_lock);
+               for (level = 0; level <= max_level; level++) {
+                       for (i = 0; i < intel_num_planes(crtc); i++)
+                               I915_WRITE(PLANE_WM(pipe, i, level),
+                                          new->plane[pipe][i][level]);
+                       I915_WRITE(CUR_WM(pipe, level),
+                                  new->cursor[pipe][level]);
+               }
+               for (i = 0; i < intel_num_planes(crtc); i++)
+                       I915_WRITE(PLANE_WM_TRANS(pipe, i),
+                                  new->plane_trans[pipe][i]);
+               I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
+
+               for (i = 0; i < intel_num_planes(crtc); i++)
+                       skl_ddb_entry_write(dev_priv,
+                                           PLANE_BUF_CFG(pipe, i),
+                                           &new->ddb.plane[pipe][i]);
+
+               skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
+                                   &new->ddb.cursor[pipe]);
+       }
 }
 
-/* There's a funny hw issue where the hw returns all 0 when reading from
- * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
- * ourselves, instead of doing a rmw cycle (which might result in us clearing
- * all limits and the gpu stuck at whatever frequency it is at atm).
+/*
+ * When setting up a new DDB allocation arrangement, we need to correctly
+ * sequence the times at which the new allocations for the pipes are taken into
+ * account or we'll have pipes fetching from space previously allocated to
+ * another pipe.
+ *
+ * Roughly the sequence looks like:
+ *  1. re-allocate the pipe(s) with the allocation being reduced and not
+ *     overlapping with a previous light-up pipe (another way to put it is:
+ *     pipes with their new allocation strickly included into their old ones).
+ *  2. re-allocate the other pipes that get their allocation reduced
+ *  3. allocate the pipes having their allocation increased
+ *
+ * Steps 1. and 2. are here to take care of the following case:
+ * - Initially DDB looks like this:
+ *     |   B    |   C    |
+ * - enable pipe A.
+ * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
+ *   allocation
+ *     |  A  |  B  |  C  |
+ *
+ * We need to sequence the re-allocation: C, B, A (and not B, C, A).
  */
-static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
+
+static void
+skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
 {
-       u32 limits;
+       struct drm_device *dev = dev_priv->dev;
+       int plane;
 
-       /* Only set the down limit when we've reached the lowest level to avoid
-        * getting more interrupts, otherwise leave this clear. This prevents a
-        * race in the hw when coming out of rc6: There's a tiny window where
-        * the hw runs at the minimal clock before selecting the desired
-        * frequency, if the down threshold expires in that window we will not
-        * receive a down interrupt. */
-       limits = dev_priv->rps.max_freq_softlimit << 24;
-       if (val <= dev_priv->rps.min_freq_softlimit)
-               limits |= dev_priv->rps.min_freq_softlimit << 16;
+       DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
 
-       return limits;
+       for_each_plane(pipe, plane) {
+               I915_WRITE(PLANE_SURF(pipe, plane),
+                          I915_READ(PLANE_SURF(pipe, plane)));
+       }
+       I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
 }
 
-static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+static bool
+skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
+                           const struct skl_ddb_allocation *new,
+                           enum pipe pipe)
 {
-       int new_power;
+       uint16_t old_size, new_size;
 
-       new_power = dev_priv->rps.power;
-       switch (dev_priv->rps.power) {
-       case LOW_POWER:
-               if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
-                       new_power = BETWEEN;
-               break;
+       old_size = skl_ddb_entry_size(&old->pipe[pipe]);
+       new_size = skl_ddb_entry_size(&new->pipe[pipe]);
 
-       case BETWEEN:
-               if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
-                       new_power = LOW_POWER;
-               else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
-                       new_power = HIGH_POWER;
-               break;
+       return old_size != new_size &&
+              new->pipe[pipe].start >= old->pipe[pipe].start &&
+              new->pipe[pipe].end <= old->pipe[pipe].end;
+}
 
-       case HIGH_POWER:
-               if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
-                       new_power = BETWEEN;
-               break;
-       }
-       /* Max/min bins are special */
-       if (val == dev_priv->rps.min_freq_softlimit)
-               new_power = LOW_POWER;
-       if (val == dev_priv->rps.max_freq_softlimit)
-               new_power = HIGH_POWER;
-       if (new_power == dev_priv->rps.power)
-               return;
-
-       /* Note the units here are not exactly 1us, but 1280ns. */
-       switch (new_power) {
-       case LOW_POWER:
-               /* Upclock if more than 95% busy over 16ms */
-               I915_WRITE(GEN6_RP_UP_EI, 12500);
-               I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
+static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
+                               struct skl_wm_values *new_values)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct skl_ddb_allocation *cur_ddb, *new_ddb;
+       bool reallocated[I915_MAX_PIPES] = {false, false, false};
+       struct intel_crtc *crtc;
+       enum pipe pipe;
 
-               /* Downclock if less than 85% busy over 32ms */
-               I915_WRITE(GEN6_RP_DOWN_EI, 25000);
-               I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
+       new_ddb = &new_values->ddb;
+       cur_ddb = &dev_priv->wm.skl_hw.ddb;
 
-               I915_WRITE(GEN6_RP_CONTROL,
-                          GEN6_RP_MEDIA_TURBO |
-                          GEN6_RP_MEDIA_HW_NORMAL_MODE |
-                          GEN6_RP_MEDIA_IS_GFX |
-                          GEN6_RP_ENABLE |
-                          GEN6_RP_UP_BUSY_AVG |
-                          GEN6_RP_DOWN_IDLE_AVG);
-               break;
+       /*
+        * First pass: flush the pipes with the new allocation contained into
+        * the old space.
+        *
+        * We'll wait for the vblank on those pipes to ensure we can safely
+        * re-allocate the freed space without this pipe fetching from it.
+        */
+       for_each_intel_crtc(dev, crtc) {
+               if (!crtc->active)
+                       continue;
 
-       case BETWEEN:
-               /* Upclock if more than 90% busy over 13ms */
-               I915_WRITE(GEN6_RP_UP_EI, 10250);
-               I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
+               pipe = crtc->pipe;
 
-               /* Downclock if less than 75% busy over 32ms */
-               I915_WRITE(GEN6_RP_DOWN_EI, 25000);
-               I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
+               if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
+                       continue;
 
-               I915_WRITE(GEN6_RP_CONTROL,
-                          GEN6_RP_MEDIA_TURBO |
-                          GEN6_RP_MEDIA_HW_NORMAL_MODE |
-                          GEN6_RP_MEDIA_IS_GFX |
-                          GEN6_RP_ENABLE |
-                          GEN6_RP_UP_BUSY_AVG |
-                          GEN6_RP_DOWN_IDLE_AVG);
-               break;
+               skl_wm_flush_pipe(dev_priv, pipe, 1);
+               intel_wait_for_vblank(dev, pipe);
 
-       case HIGH_POWER:
-               /* Upclock if more than 85% busy over 10ms */
-               I915_WRITE(GEN6_RP_UP_EI, 8000);
-               I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
+               reallocated[pipe] = true;
+       }
 
-               /* Downclock if less than 60% busy over 32ms */
-               I915_WRITE(GEN6_RP_DOWN_EI, 25000);
-               I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
 
-               I915_WRITE(GEN6_RP_CONTROL,
-                          GEN6_RP_MEDIA_TURBO |
-                          GEN6_RP_MEDIA_HW_NORMAL_MODE |
-                          GEN6_RP_MEDIA_IS_GFX |
-                          GEN6_RP_ENABLE |
-                          GEN6_RP_UP_BUSY_AVG |
-                          GEN6_RP_DOWN_IDLE_AVG);
-               break;
-       }
+       /*
+        * Second pass: flush the pipes that are having their allocation
+        * reduced, but overlapping with a previous allocation.
+        *
+        * Here as well we need to wait for the vblank to make sure the freed
+        * space is not used anymore.
+        */
+       for_each_intel_crtc(dev, crtc) {
+               if (!crtc->active)
+                       continue;
 
-       dev_priv->rps.power = new_power;
-       dev_priv->rps.last_adj = 0;
-}
+               pipe = crtc->pipe;
 
-static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
-{
-       u32 mask = 0;
+               if (reallocated[pipe])
+                       continue;
 
-       if (val > dev_priv->rps.min_freq_softlimit)
-               mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
-       if (val < dev_priv->rps.max_freq_softlimit)
-               mask |= GEN6_PM_RP_UP_THRESHOLD;
+               if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
+                   skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
+                       skl_wm_flush_pipe(dev_priv, pipe, 2);
+                       intel_wait_for_vblank(dev, pipe);
+               }
 
-       mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
-       mask &= dev_priv->pm_rps_events;
+               reallocated[pipe] = true;
+       }
 
-       /* IVB and SNB hard hangs on looping batchbuffer
-        * if GEN6_PM_UP_EI_EXPIRED is masked.
+       /*
+        * Third pass: flush the pipes that got more space allocated.
+        *
+        * We don't need to actively wait for the update here, next vblank
+        * will just get more DDB space with the correct WM values.
         */
-       if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
-               mask |= GEN6_PM_RP_UP_EI_EXPIRED;
+       for_each_intel_crtc(dev, crtc) {
+               if (!crtc->active)
+                       continue;
 
-       if (IS_GEN8(dev_priv->dev))
-               mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+               pipe = crtc->pipe;
 
-       return ~mask;
+               /*
+                * At this point, only the pipes more space than before are
+                * left to re-allocate.
+                */
+               if (reallocated[pipe])
+                       continue;
+
+               skl_wm_flush_pipe(dev_priv, pipe, 3);
+       }
 }
 
-/* gen6_set_rps is called to update the frequency request, but should also be
- * called when the range (min_delay and max_delay) is modified so that we can
- * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
-void gen6_set_rps(struct drm_device *dev, u8 val)
+static bool skl_update_pipe_wm(struct drm_crtc *crtc,
+                              struct skl_pipe_wm_parameters *params,
+                              struct intel_wm_config *config,
+                              struct skl_ddb_allocation *ddb, /* out */
+                              struct skl_pipe_wm *pipe_wm /* out */)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-       WARN_ON(val > dev_priv->rps.max_freq_softlimit);
-       WARN_ON(val < dev_priv->rps.min_freq_softlimit);
-
-       /* min/max delay may still have been modified so be sure to
-        * write the limits value.
-        */
-       if (val != dev_priv->rps.cur_freq) {
-               gen6_set_rps_thresholds(dev_priv, val);
-
-               if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-                       I915_WRITE(GEN6_RPNSWREQ,
-                                  HSW_FREQUENCY(val));
-               else
-                       I915_WRITE(GEN6_RPNSWREQ,
-                                  GEN6_FREQUENCY(val) |
-                                  GEN6_OFFSET(0) |
-                                  GEN6_AGGRESSIVE_TURBO);
-       }
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-       /* Make sure we continue to get interrupts
-        * until we hit the minimum or maximum frequencies.
-        */
-       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
-       I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+       skl_compute_wm_pipe_parameters(crtc, params);
+       skl_allocate_pipe_ddb(crtc, config, params, ddb);
+       skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
 
-       POSTING_READ(GEN6_RPNSWREQ);
+       if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
+               return false;
 
-       dev_priv->rps.cur_freq = val;
-       trace_intel_gpu_freq_change(val * 50);
+       intel_crtc->wm.skl_active = *pipe_wm;
+       return true;
 }
 
-/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
- *
- * * If Gfx is Idle, then
- * 1. Mask Turbo interrupts
- * 2. Bring up Gfx clock
- * 3. Change the freq to Rpn and wait till P-Unit updates freq
- * 4. Clear the Force GFX CLK ON bit so that Gfx can down
- * 5. Unmask Turbo interrupts
-*/
-static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
+static void skl_update_other_pipe_wm(struct drm_device *dev,
+                                    struct drm_crtc *crtc,
+                                    struct intel_wm_config *config,
+                                    struct skl_wm_values *r)
 {
-       struct drm_device *dev = dev_priv->dev;
-
-       /* Latest VLV doesn't need to force the gfx clock */
-       if (dev->pdev->revision >= 0xd) {
-               valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
-               return;
-       }
+       struct intel_crtc *intel_crtc;
+       struct intel_crtc *this_crtc = to_intel_crtc(crtc);
 
        /*
-        * When we are idle.  Drop to min voltage state.
+        * If the WM update hasn't changed the allocation for this_crtc (the
+        * crtc we are currently computing the new WM values for), other
+        * enabled crtcs will keep the same allocation and we don't need to
+        * recompute anything for them.
         */
-
-       if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
+       if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
                return;
 
-       /* Mask turbo interrupt so that they will not come in between */
-       I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
-
-       vlv_force_gfx_clock(dev_priv, true);
-
-       dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
-
-       vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
-                                       dev_priv->rps.min_freq_softlimit);
-
-       if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
-                               & GENFREQSTATUS) == 0, 5))
-               DRM_ERROR("timed out waiting for Punit\n");
-
-       vlv_force_gfx_clock(dev_priv, false);
+       /*
+        * Otherwise, because of this_crtc being freshly enabled/disabled, the
+        * other active pipes need new DDB allocation and WM values.
+        */
+       list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+                               base.head) {
+               struct skl_pipe_wm_parameters params = {};
+               struct skl_pipe_wm pipe_wm = {};
+               bool wm_changed;
 
-       I915_WRITE(GEN6_PMINTRMSK,
-                  gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
-}
+               if (this_crtc->pipe == intel_crtc->pipe)
+                       continue;
 
-void gen6_rps_idle(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
+               if (!intel_crtc->active)
+                       continue;
 
-       mutex_lock(&dev_priv->rps.hw_lock);
-       if (dev_priv->rps.enabled) {
-               if (IS_CHERRYVIEW(dev))
-                       valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
-               else if (IS_VALLEYVIEW(dev))
-                       vlv_set_rps_idle(dev_priv);
-               else
-                       gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
-               dev_priv->rps.last_adj = 0;
-       }
-       mutex_unlock(&dev_priv->rps.hw_lock);
-}
+               wm_changed = skl_update_pipe_wm(&intel_crtc->base,
+                                               &params, config,
+                                               &r->ddb, &pipe_wm);
 
-void gen6_rps_boost(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
+               /*
+                * If we end up re-computing the other pipe WM values, it's
+                * because it was really needed, so we expect the WM values to
+                * be different.
+                */
+               WARN_ON(!wm_changed);
 
-       mutex_lock(&dev_priv->rps.hw_lock);
-       if (dev_priv->rps.enabled) {
-               if (IS_VALLEYVIEW(dev))
-                       valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
-               else
-                       gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
-               dev_priv->rps.last_adj = 0;
+               skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc);
+               r->dirty[intel_crtc->pipe] = true;
        }
-       mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
-void valleyview_set_rps(struct drm_device *dev, u8 val)
+static void skl_update_wm(struct drm_crtc *crtc)
 {
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct skl_pipe_wm_parameters params = {};
+       struct skl_wm_values *results = &dev_priv->wm.skl_results;
+       struct skl_pipe_wm pipe_wm = {};
+       struct intel_wm_config config = {};
 
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-       WARN_ON(val > dev_priv->rps.max_freq_softlimit);
-       WARN_ON(val < dev_priv->rps.min_freq_softlimit);
+       memset(results, 0, sizeof(*results));
 
-       if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
-                     "Odd GPU freq value\n"))
-               val &= ~1;
+       skl_compute_wm_global_parameters(dev, &config);
 
-       if (val != dev_priv->rps.cur_freq) {
-               DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
-                                vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
-                                dev_priv->rps.cur_freq,
-                                vlv_gpu_freq(dev_priv, val), val);
+       if (!skl_update_pipe_wm(crtc, &params, &config,
+                               &results->ddb, &pipe_wm))
+               return;
 
-               vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
-       }
+       skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc);
+       results->dirty[intel_crtc->pipe] = true;
 
-       I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+       skl_update_other_pipe_wm(dev, crtc, &config, results);
+       skl_write_wm_values(dev_priv, results);
+       skl_flush_wm_values(dev_priv, results);
 
-       dev_priv->rps.cur_freq = val;
-       trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
+       /* store the new configuration */
+       dev_priv->wm.skl_hw = *results;
 }
 
-static void gen8_disable_rps_interrupts(struct drm_device *dev)
+static void
+skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
+                    uint32_t sprite_width, uint32_t sprite_height,
+                    int pixel_size, bool enabled, bool scaled)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
-       I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
-                                  ~dev_priv->pm_rps_events);
-       /* Complete PM interrupt masking here doesn't race with the rps work
-        * item again unmasking PM interrupts because that is using a different
-        * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
-        * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
-        * gen8_enable_rps will clean up. */
+       struct intel_plane *intel_plane = to_intel_plane(plane);
 
-       spin_lock_irq(&dev_priv->irq_lock);
-       dev_priv->rps.pm_iir = 0;
-       spin_unlock_irq(&dev_priv->irq_lock);
+       intel_plane->wm.enabled = enabled;
+       intel_plane->wm.scaled = scaled;
+       intel_plane->wm.horiz_pixels = sprite_width;
+       intel_plane->wm.vert_pixels = sprite_height;
+       intel_plane->wm.bytes_per_pixel = pixel_size;
 
-       I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
+       skl_update_wm(crtc);
 }
 
-static void gen6_disable_rps_interrupts(struct drm_device *dev)
+static void ilk_update_wm(struct drm_crtc *crtc)
 {
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct ilk_wm_maximums max;
+       struct ilk_pipe_wm_parameters params = {};
+       struct ilk_wm_values results = {};
+       enum intel_ddb_partitioning partitioning;
+       struct intel_pipe_wm pipe_wm = {};
+       struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
+       struct intel_wm_config config = {};
 
-       I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
-       I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
-                               ~dev_priv->pm_rps_events);
-       /* Complete PM interrupt masking here doesn't race with the rps work
-        * item again unmasking PM interrupts because that is using a different
-        * register (PMIMR) to mask PM interrupts. The only risk is in leaving
-        * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+       ilk_compute_wm_parameters(crtc, &params);
 
-       spin_lock_irq(&dev_priv->irq_lock);
-       dev_priv->rps.pm_iir = 0;
-       spin_unlock_irq(&dev_priv->irq_lock);
+       intel_compute_pipe_wm(crtc, &params, &pipe_wm);
 
-       I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
-}
+       if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
+               return;
 
-static void gen6_disable_rps(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       intel_crtc->wm.active = pipe_wm;
 
-       I915_WRITE(GEN6_RC_CONTROL, 0);
-       I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+       ilk_compute_wm_config(dev, &config);
 
-       if (IS_BROADWELL(dev))
-               gen8_disable_rps_interrupts(dev);
-       else
-               gen6_disable_rps_interrupts(dev);
-}
+       ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
+       ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
 
-static void cherryview_disable_rps(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       /* 5/6 split only in single pipe config on IVB+ */
+       if (INTEL_INFO(dev)->gen >= 7 &&
+           config.num_pipes_active == 1 && config.sprites_enabled) {
+               ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
+               ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
 
-       I915_WRITE(GEN6_RC_CONTROL, 0);
+               best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
+       } else {
+               best_lp_wm = &lp_wm_1_2;
+       }
+
+       partitioning = (best_lp_wm == &lp_wm_1_2) ?
+                      INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
+
+       ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
 
-       gen8_disable_rps_interrupts(dev);
+       ilk_write_wm_values(dev_priv, &results);
 }
 
-static void valleyview_disable_rps(struct drm_device *dev)
+static void
+ilk_update_sprite_wm(struct drm_plane *plane,
+                    struct drm_crtc *crtc,
+                    uint32_t sprite_width, uint32_t sprite_height,
+                    int pixel_size, bool enabled, bool scaled)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       /* we're doing forcewake before Disabling RC6,
-        * This what the BIOS expects when going into suspend */
-       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+       struct drm_device *dev = plane->dev;
+       struct intel_plane *intel_plane = to_intel_plane(plane);
 
-       I915_WRITE(GEN6_RC_CONTROL, 0);
+       intel_plane->wm.enabled = enabled;
+       intel_plane->wm.scaled = scaled;
+       intel_plane->wm.horiz_pixels = sprite_width;
+       intel_plane->wm.vert_pixels = sprite_width;
+       intel_plane->wm.bytes_per_pixel = pixel_size;
 
-       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+       /*
+        * IVB workaround: must disable low power watermarks for at least
+        * one frame before enabling scaling.  LP watermarks can be re-enabled
+        * when scaling is disabled.
+        *
+        * WaCxSRDisabledForSpriteScaling:ivb
+        */
+       if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
+               intel_wait_for_vblank(dev, intel_plane->pipe);
 
-       gen6_disable_rps_interrupts(dev);
+       ilk_update_wm(crtc);
 }
 
-static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
+static void skl_pipe_wm_active_state(uint32_t val,
+                                    struct skl_pipe_wm *active,
+                                    bool is_transwm,
+                                    bool is_cursor,
+                                    int i,
+                                    int level)
 {
-       if (IS_VALLEYVIEW(dev)) {
-               if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
-                       mode = GEN6_RC_CTL_RC6_ENABLE;
-               else
-                       mode = 0;
+       bool is_enabled = (val & PLANE_WM_EN) != 0;
+
+       if (!is_transwm) {
+               if (!is_cursor) {
+                       active->wm[level].plane_en[i] = is_enabled;
+                       active->wm[level].plane_res_b[i] =
+                                       val & PLANE_WM_BLOCKS_MASK;
+                       active->wm[level].plane_res_l[i] =
+                                       (val >> PLANE_WM_LINES_SHIFT) &
+                                               PLANE_WM_LINES_MASK;
+               } else {
+                       active->wm[level].cursor_en = is_enabled;
+                       active->wm[level].cursor_res_b =
+                                       val & PLANE_WM_BLOCKS_MASK;
+                       active->wm[level].cursor_res_l =
+                                       (val >> PLANE_WM_LINES_SHIFT) &
+                                               PLANE_WM_LINES_MASK;
+               }
+       } else {
+               if (!is_cursor) {
+                       active->trans_wm.plane_en[i] = is_enabled;
+                       active->trans_wm.plane_res_b[i] =
+                                       val & PLANE_WM_BLOCKS_MASK;
+                       active->trans_wm.plane_res_l[i] =
+                                       (val >> PLANE_WM_LINES_SHIFT) &
+                                               PLANE_WM_LINES_MASK;
+               } else {
+                       active->trans_wm.cursor_en = is_enabled;
+                       active->trans_wm.cursor_res_b =
+                                       val & PLANE_WM_BLOCKS_MASK;
+                       active->trans_wm.cursor_res_l =
+                                       (val >> PLANE_WM_LINES_SHIFT) &
+                                               PLANE_WM_LINES_MASK;
+               }
        }
-       DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
-                     (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
-                     (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
-                     (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
 }
 
-static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
+static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 {
-       /* No RC6 before Ironlake */
-       if (INTEL_INFO(dev)->gen < 5)
-               return 0;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
+       enum pipe pipe = intel_crtc->pipe;
+       int level, i, max_level;
+       uint32_t temp;
 
-       /* RC6 is only on Ironlake mobile not on desktop */
-       if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
-               return 0;
+       max_level = ilk_wm_max_level(dev);
 
-       /* Respect the kernel parameter if it is set */
-       if (enable_rc6 >= 0) {
-               int mask;
+       hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
 
-               if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
-                       mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
-                              INTEL_RC6pp_ENABLE;
-               else
-                       mask = INTEL_RC6_ENABLE;
+       for (level = 0; level <= max_level; level++) {
+               for (i = 0; i < intel_num_planes(intel_crtc); i++)
+                       hw->plane[pipe][i][level] =
+                                       I915_READ(PLANE_WM(pipe, i, level));
+               hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level));
+       }
 
-               if ((enable_rc6 & mask) != enable_rc6)
-                       DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
-                                     enable_rc6 & mask, enable_rc6, mask);
+       for (i = 0; i < intel_num_planes(intel_crtc); i++)
+               hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
+       hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
 
-               return enable_rc6 & mask;
-       }
+       if (!intel_crtc_active(crtc))
+               return;
 
-       /* Disable RC6 on Ironlake */
-       if (INTEL_INFO(dev)->gen == 5)
-               return 0;
+       hw->dirty[pipe] = true;
 
-       if (IS_IVYBRIDGE(dev))
-               return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+       active->linetime = hw->wm_linetime[pipe];
 
-       return INTEL_RC6_ENABLE;
-}
+       for (level = 0; level <= max_level; level++) {
+               for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+                       temp = hw->plane[pipe][i][level];
+                       skl_pipe_wm_active_state(temp, active, false,
+                                               false, i, level);
+               }
+               temp = hw->cursor[pipe][level];
+               skl_pipe_wm_active_state(temp, active, false, true, i, level);
+       }
 
-int intel_enable_rc6(const struct drm_device *dev)
-{
-       return i915.enable_rc6;
+       for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+               temp = hw->plane_trans[pipe][i];
+               skl_pipe_wm_active_state(temp, active, true, false, i, 0);
+       }
+
+       temp = hw->cursor_trans[pipe];
+       skl_pipe_wm_active_state(temp, active, true, true, i, 0);
 }
 
-static void gen8_enable_rps_interrupts(struct drm_device *dev)
+void skl_wm_get_hw_state(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
+       struct drm_crtc *crtc;
 
-       spin_lock_irq(&dev_priv->irq_lock);
-       WARN_ON(dev_priv->rps.pm_iir);
-       gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
-       I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
-       spin_unlock_irq(&dev_priv->irq_lock);
+       skl_ddb_get_hw_state(dev_priv, ddb);
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               skl_pipe_wm_get_hw_state(crtc);
 }
 
-static void gen6_enable_rps_interrupts(struct drm_device *dev)
+static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 {
+       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct ilk_wm_values *hw = &dev_priv->wm.hw;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_pipe_wm *active = &intel_crtc->wm.active;
+       enum pipe pipe = intel_crtc->pipe;
+       static const unsigned int wm0_pipe_reg[] = {
+               [PIPE_A] = WM0_PIPEA_ILK,
+               [PIPE_B] = WM0_PIPEB_ILK,
+               [PIPE_C] = WM0_PIPEC_IVB,
+       };
 
-       spin_lock_irq(&dev_priv->irq_lock);
-       WARN_ON(dev_priv->rps.pm_iir);
-       gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
-       I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
-       spin_unlock_irq(&dev_priv->irq_lock);
-}
+       hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
 
-static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
-{
-       /* All of these values are in units of 50MHz */
-       dev_priv->rps.cur_freq          = 0;
-       /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
-       dev_priv->rps.rp1_freq          = (rp_state_cap >>  8) & 0xff;
-       dev_priv->rps.rp0_freq          = (rp_state_cap >>  0) & 0xff;
-       dev_priv->rps.min_freq          = (rp_state_cap >> 16) & 0xff;
-       /* XXX: only BYT has a special efficient freq */
-       dev_priv->rps.efficient_freq    = dev_priv->rps.rp1_freq;
-       /* hw_max = RP0 until we check for overclocking */
-       dev_priv->rps.max_freq          = dev_priv->rps.rp0_freq;
+       active->pipe_enabled = intel_crtc_active(crtc);
 
-       /* Preserve min/max settings in case of re-init */
-       if (dev_priv->rps.max_freq_softlimit == 0)
-               dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+       if (active->pipe_enabled) {
+               u32 tmp = hw->wm_pipe[pipe];
 
-       if (dev_priv->rps.min_freq_softlimit == 0)
-               dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+               /*
+                * For active pipes LP0 watermark is marked as
+                * enabled, and LP1+ watermaks as disabled since
+                * we can't really reverse compute them in case
+                * multiple pipes are active.
+                */
+               active->wm[0].enable = true;
+               active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
+               active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
+               active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
+               active->linetime = hw->wm_linetime[pipe];
+       } else {
+               int level, max_level = ilk_wm_max_level(dev);
+
+               /*
+                * For inactive pipes, all watermark levels
+                * should be marked as enabled but zeroed,
+                * which is what we'd compute them to.
+                */
+               for (level = 0; level <= max_level; level++)
+                       active->wm[level].enable = true;
+       }
 }
 
-static void gen8_enable_rps(struct drm_device *dev)
+void ilk_wm_get_hw_state(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring;
-       uint32_t rc6_mask = 0, rp_state_cap;
-       int unused;
-
-       /* 1a: Software RC state - RC0 */
-       I915_WRITE(GEN6_RC_STATE, 0);
-
-       /* 1c & 1d: Get forcewake during program sequence. Although the driver
-        * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
-
-       /* 2a: Disable RC states. */
-       I915_WRITE(GEN6_RC_CONTROL, 0);
-
-       rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-       parse_rp_state_cap(dev_priv, rp_state_cap);
-
-       /* 2b: Program RC6 thresholds.*/
-       I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
-       I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
-       I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-       for_each_ring(ring, dev_priv, unused)
-               I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
-       I915_WRITE(GEN6_RC_SLEEP, 0);
-       if (IS_BROADWELL(dev))
-               I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
-       else
-               I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
-
-       /* 3: Enable RC6 */
-       if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
-               rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
-       intel_print_rc6_info(dev, rc6_mask);
-       if (IS_BROADWELL(dev))
-               I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
-                               GEN7_RC_CTL_TO_MODE |
-                               rc6_mask);
-       else
-               I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
-                               GEN6_RC_CTL_EI_MODE(1) |
-                               rc6_mask);
-
-       /* 4 Program defaults and thresholds for RPS*/
-       I915_WRITE(GEN6_RPNSWREQ,
-                  HSW_FREQUENCY(dev_priv->rps.rp1_freq));
-       I915_WRITE(GEN6_RC_VIDEO_FREQ,
-                  HSW_FREQUENCY(dev_priv->rps.rp1_freq));
-       /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
-       I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
-
-       /* Docs recommend 900MHz, and 300 MHz respectively */
-       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-                  dev_priv->rps.max_freq_softlimit << 24 |
-                  dev_priv->rps.min_freq_softlimit << 16);
-
-       I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
-       I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
-       I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
-       I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
-
-       I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+       struct ilk_wm_values *hw = &dev_priv->wm.hw;
+       struct drm_crtc *crtc;
 
-       /* 5: Enable RPS */
-       I915_WRITE(GEN6_RP_CONTROL,
-                  GEN6_RP_MEDIA_TURBO |
-                  GEN6_RP_MEDIA_HW_NORMAL_MODE |
-                  GEN6_RP_MEDIA_IS_GFX |
-                  GEN6_RP_ENABLE |
-                  GEN6_RP_UP_BUSY_AVG |
-                  GEN6_RP_DOWN_IDLE_AVG);
+       for_each_crtc(dev, crtc)
+               ilk_pipe_wm_get_hw_state(crtc);
 
-       /* 6: Ring frequency + overclocking (our driver does this later */
+       hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
+       hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
+       hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
 
-       gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
+       hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
+       if (INTEL_INFO(dev)->gen >= 7) {
+               hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
+               hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+       }
 
-       gen8_enable_rps_interrupts(dev);
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+                       INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+       else if (IS_IVYBRIDGE(dev))
+               hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
+                       INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
 
-       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+       hw->enable_fbc_wm =
+               !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
 }
 
-static void gen6_enable_rps(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring;
-       u32 rp_state_cap;
-       u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
-       u32 gtfifodbg;
-       int rc6_mode;
-       int i, ret;
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ *   - normal (i.e. non-self-refresh)
+ *   - self-refresh (SR) mode
+ *   - lines are large relative to FIFO size (buffer can hold up to 2)
+ *   - lines are small relative to FIFO size (buffer can hold more than 2
+ *     lines), so need to account for TLB latency
+ *
+ *   The normal calculation is:
+ *     watermark = dotclock * bytes per pixel * latency
+ *   where latency is platform & configuration dependent (we assume pessimal
+ *   values here).
+ *
+ *   The SR calculation is:
+ *     watermark = (trunc(latency/line time)+1) * surface width *
+ *       bytes per pixel
+ *   where
+ *     line time = htotal / dotclock
+ *     surface width = hdisplay for normal plane and 64 for cursor
+ *   and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that.  And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+void intel_update_watermarks(struct drm_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
 
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+       if (dev_priv->display.update_wm)
+               dev_priv->display.update_wm(crtc);
+}
 
-       /* Here begins a magic sequence of register writes to enable
-        * auto-downclocking.
-        *
-        * Perhaps there might be some value in exposing these to
-        * userspace...
-        */
-       I915_WRITE(GEN6_RC_STATE, 0);
+void intel_update_sprite_watermarks(struct drm_plane *plane,
+                                   struct drm_crtc *crtc,
+                                   uint32_t sprite_width,
+                                   uint32_t sprite_height,
+                                   int pixel_size,
+                                   bool enabled, bool scaled)
+{
+       struct drm_i915_private *dev_priv = plane->dev->dev_private;
 
-       /* Clear the DBG now so we don't confuse earlier errors */
-       if ((gtfifodbg = I915_READ(GTFIFODBG))) {
-               DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
-               I915_WRITE(GTFIFODBG, gtfifodbg);
-       }
+       if (dev_priv->display.update_sprite_wm)
+               dev_priv->display.update_sprite_wm(plane, crtc,
+                                                  sprite_width, sprite_height,
+                                                  pixel_size, enabled, scaled);
+}
 
-       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+static struct drm_i915_gem_object *
+intel_alloc_context_page(struct drm_device *dev)
+{
+       struct drm_i915_gem_object *ctx;
+       int ret;
 
-       rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       parse_rp_state_cap(dev_priv, rp_state_cap);
+       ctx = i915_gem_alloc_object(dev, 4096);
+       if (!ctx) {
+               DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
+               return NULL;
+       }
 
-       /* disable the counters and set deterministic thresholds */
-       I915_WRITE(GEN6_RC_CONTROL, 0);
+       ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
+       if (ret) {
+               DRM_ERROR("failed to pin power context: %d\n", ret);
+               goto err_unref;
+       }
 
-       I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
-       I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
-       I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
-       I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
-       I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+       ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
+       if (ret) {
+               DRM_ERROR("failed to set-domain on power context: %d\n", ret);
+               goto err_unpin;
+       }
 
-       for_each_ring(ring, dev_priv, i)
-               I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+       return ctx;
 
-       I915_WRITE(GEN6_RC_SLEEP, 0);
-       I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
-       if (IS_IVYBRIDGE(dev))
-               I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
-       else
-               I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
-       I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
-       I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+err_unpin:
+       i915_gem_object_ggtt_unpin(ctx);
+err_unref:
+       drm_gem_object_unreference(&ctx->base);
+       return NULL;
+}
 
-       /* Check if we are enabling RC6 */
-       rc6_mode = intel_enable_rc6(dev_priv->dev);
-       if (rc6_mode & INTEL_RC6_ENABLE)
-               rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
+/**
+ * Lock protecting IPS related data structures
+ */
+DEFINE_SPINLOCK(mchdev_lock);
 
-       /* We don't use those on Haswell */
-       if (!IS_HASWELL(dev)) {
-               if (rc6_mode & INTEL_RC6p_ENABLE)
-                       rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+/* Global for IPS driver to get at the current i915 device. Protected by
+ * mchdev_lock. */
+static struct drm_i915_private *i915_mch_dev;
 
-               if (rc6_mode & INTEL_RC6pp_ENABLE)
-                       rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+bool ironlake_set_drps(struct drm_device *dev, u8 val)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u16 rgvswctl;
+
+       assert_spin_locked(&mchdev_lock);
+
+       rgvswctl = I915_READ16(MEMSWCTL);
+       if (rgvswctl & MEMCTL_CMD_STS) {
+               DRM_DEBUG("gpu busy, RCS change rejected\n");
+               return false; /* still busy with another command */
        }
 
-       intel_print_rc6_info(dev, rc6_mask);
+       rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+               (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+       I915_WRITE16(MEMSWCTL, rgvswctl);
+       POSTING_READ16(MEMSWCTL);
 
-       I915_WRITE(GEN6_RC_CONTROL,
-                  rc6_mask |
-                  GEN6_RC_CTL_EI_MODE(1) |
-                  GEN6_RC_CTL_HW_ENABLE);
+       rgvswctl |= MEMCTL_CMD_STS;
+       I915_WRITE16(MEMSWCTL, rgvswctl);
 
-       /* Power down if completely idle for over 50ms */
-       I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
-       I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+       return true;
+}
 
-       ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
-       if (ret)
-               DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
+static void ironlake_enable_drps(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 rgvmodectl = I915_READ(MEMMODECTL);
+       u8 fmax, fmin, fstart, vstart;
 
-       ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
-       if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
-               DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
-                                (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
-                                (pcu_mbox & 0xff) * 50);
-               dev_priv->rps.max_freq = pcu_mbox & 0xff;
-       }
+       spin_lock_irq(&mchdev_lock);
 
-       dev_priv->rps.power = HIGH_POWER; /* force a reset */
-       gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+       /* Enable temp reporting */
+       I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+       I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
 
-       gen6_enable_rps_interrupts(dev);
+       /* 100ms RC evaluation intervals */
+       I915_WRITE(RCUPEI, 100000);
+       I915_WRITE(RCDNEI, 100000);
 
-       rc6vids = 0;
-       ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
-       if (IS_GEN6(dev) && ret) {
-               DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
-       } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
-               DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
-                         GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
-               rc6vids &= 0xffff00;
-               rc6vids |= GEN6_ENCODE_RC6_VID(450);
-               ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
-               if (ret)
-                       DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
-       }
+       /* Set max/min thresholds to 90ms and 80ms respectively */
+       I915_WRITE(RCBMAXAVG, 90000);
+       I915_WRITE(RCBMINAVG, 80000);
 
-       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
-}
+       I915_WRITE(MEMIHYST, 1);
 
-static void __gen6_update_ring_freq(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int min_freq = 15;
-       unsigned int gpu_freq;
-       unsigned int max_ia_freq, min_ring_freq;
-       int scaling_factor = 180;
-       struct cpufreq_policy *policy;
+       /* Set up min, max, and cur for interrupt handling */
+       fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+       fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+       fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+               MEMMODE_FSTART_SHIFT;
 
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+       vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+               PXVFREQ_PX_SHIFT;
 
-       policy = cpufreq_cpu_get(0);
-       if (policy) {
-               max_ia_freq = policy->cpuinfo.max_freq;
-               cpufreq_cpu_put(policy);
-       } else {
-               /*
-                * Default to measured freq if none found, PCU will ensure we
-                * don't go over
-                */
-               max_ia_freq = tsc_khz;
-       }
+       dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
+       dev_priv->ips.fstart = fstart;
 
-       /* Convert from kHz to MHz */
-       max_ia_freq /= 1000;
+       dev_priv->ips.max_delay = fstart;
+       dev_priv->ips.min_delay = fmin;
+       dev_priv->ips.cur_delay = fstart;
 
-       min_ring_freq = I915_READ(DCLK) & 0xf;
-       /* convert DDR frequency from units of 266.6MHz to bandwidth */
-       min_ring_freq = mult_frac(min_ring_freq, 8, 3);
+       DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+                        fmax, fmin, fstart);
+
+       I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
 
        /*
-        * For each potential GPU frequency, load a ring frequency we'd like
-        * to use for memory access.  We do this by specifying the IA frequency
-        * the PCU should use as a reference to determine the ring frequency.
+        * Interrupts will be enabled in ironlake_irq_postinstall
         */
-       for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
-            gpu_freq--) {
-               int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
-               unsigned int ia_freq = 0, ring_freq = 0;
 
-               if (INTEL_INFO(dev)->gen >= 8) {
-                       /* max(2 * GT, DDR). NB: GT is 50MHz units */
-                       ring_freq = max(min_ring_freq, gpu_freq);
-               } else if (IS_HASWELL(dev)) {
-                       ring_freq = mult_frac(gpu_freq, 5, 4);
-                       ring_freq = max(min_ring_freq, ring_freq);
-                       /* leave ia_freq as the default, chosen by cpufreq */
-               } else {
-                       /* On older processors, there is no separate ring
-                        * clock domain, so in order to boost the bandwidth
-                        * of the ring, we need to upclock the CPU (ia_freq).
-                        *
-                        * For GPU frequencies less than 750MHz,
-                        * just use the lowest ring freq.
-                        */
-                       if (gpu_freq < min_freq)
-                               ia_freq = 800;
-                       else
-                               ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
-                       ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
-               }
+       I915_WRITE(VIDSTART, vstart);
+       POSTING_READ(VIDSTART);
 
-               sandybridge_pcode_write(dev_priv,
-                                       GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
-                                       ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
-                                       ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
-                                       gpu_freq);
-       }
-}
+       rgvmodectl |= MEMMODE_SWMODE_EN;
+       I915_WRITE(MEMMODECTL, rgvmodectl);
 
-void gen6_update_ring_freq(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+               DRM_ERROR("stuck trying to change perf mode\n");
+       mdelay(1);
 
-       if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
-               return;
+       ironlake_set_drps(dev, fstart);
 
-       mutex_lock(&dev_priv->rps.hw_lock);
-       __gen6_update_ring_freq(dev);
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+               I915_READ(0x112e0);
+       dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
+       dev_priv->ips.last_count2 = I915_READ(0x112f4);
+       dev_priv->ips.last_time2 = ktime_get_raw_ns();
+
+       spin_unlock_irq(&mchdev_lock);
 }
 
-static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
+static void ironlake_disable_drps(struct drm_device *dev)
 {
-       u32 val, rp0;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u16 rgvswctl;
 
-       val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
-       rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+       spin_lock_irq(&mchdev_lock);
 
-       return rp0;
-}
+       rgvswctl = I915_READ16(MEMSWCTL);
 
-static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
-{
-       u32 val, rpe;
+       /* Ack interrupts, disable EFC interrupt */
+       I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
+       I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
+       I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
+       I915_WRITE(DEIIR, DE_PCU_EVENT);
+       I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
 
-       val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
-       rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
+       /* Go back to the starting frequency */
+       ironlake_set_drps(dev, dev_priv->ips.fstart);
+       mdelay(1);
+       rgvswctl |= MEMCTL_CMD_STS;
+       I915_WRITE(MEMSWCTL, rgvswctl);
+       mdelay(1);
 
-       return rpe;
+       spin_unlock_irq(&mchdev_lock);
 }
 
-static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
+/* There's a funny hw issue where the hw returns all 0 when reading from
+ * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
+ * ourselves, instead of doing a rmw cycle (which might result in us clearing
+ * all limits and the gpu stuck at whatever frequency it is at atm).
+ */
+static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
 {
-       u32 val, rp1;
+       u32 limits;
 
-       val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-       rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+       /* Only set the down limit when we've reached the lowest level to avoid
+        * getting more interrupts, otherwise leave this clear. This prevents a
+        * race in the hw when coming out of rc6: There's a tiny window where
+        * the hw runs at the minimal clock before selecting the desired
+        * frequency, if the down threshold expires in that window we will not
+        * receive a down interrupt. */
+       limits = dev_priv->rps.max_freq_softlimit << 24;
+       if (val <= dev_priv->rps.min_freq_softlimit)
+               limits |= dev_priv->rps.min_freq_softlimit << 16;
 
-       return rp1;
+       return limits;
 }
 
-static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
+static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
 {
-       u32 val, rpn;
+       int new_power;
 
-       val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
-       rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
-       return rpn;
-}
+       new_power = dev_priv->rps.power;
+       switch (dev_priv->rps.power) {
+       case LOW_POWER:
+               if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
+                       new_power = BETWEEN;
+               break;
 
-static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
-{
-       u32 val, rp1;
+       case BETWEEN:
+               if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
+                       new_power = LOW_POWER;
+               else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
+                       new_power = HIGH_POWER;
+               break;
 
-       val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
+       case HIGH_POWER:
+               if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
+                       new_power = BETWEEN;
+               break;
+       }
+       /* Max/min bins are special */
+       if (val == dev_priv->rps.min_freq_softlimit)
+               new_power = LOW_POWER;
+       if (val == dev_priv->rps.max_freq_softlimit)
+               new_power = HIGH_POWER;
+       if (new_power == dev_priv->rps.power)
+               return;
 
-       rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
+       /* Note the units here are not exactly 1us, but 1280ns. */
+       switch (new_power) {
+       case LOW_POWER:
+               /* Upclock if more than 95% busy over 16ms */
+               I915_WRITE(GEN6_RP_UP_EI, 12500);
+               I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
 
-       return rp1;
-}
+               /* Downclock if less than 85% busy over 32ms */
+               I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+               I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
 
-static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
-{
-       u32 val, rp0;
+               I915_WRITE(GEN6_RP_CONTROL,
+                          GEN6_RP_MEDIA_TURBO |
+                          GEN6_RP_MEDIA_HW_NORMAL_MODE |
+                          GEN6_RP_MEDIA_IS_GFX |
+                          GEN6_RP_ENABLE |
+                          GEN6_RP_UP_BUSY_AVG |
+                          GEN6_RP_DOWN_IDLE_AVG);
+               break;
 
-       val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
+       case BETWEEN:
+               /* Upclock if more than 90% busy over 13ms */
+               I915_WRITE(GEN6_RP_UP_EI, 10250);
+               I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
 
-       rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
-       /* Clamp to max */
-       rp0 = min_t(u32, rp0, 0xea);
+               /* Downclock if less than 75% busy over 32ms */
+               I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+               I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
 
-       return rp0;
-}
+               I915_WRITE(GEN6_RP_CONTROL,
+                          GEN6_RP_MEDIA_TURBO |
+                          GEN6_RP_MEDIA_HW_NORMAL_MODE |
+                          GEN6_RP_MEDIA_IS_GFX |
+                          GEN6_RP_ENABLE |
+                          GEN6_RP_UP_BUSY_AVG |
+                          GEN6_RP_DOWN_IDLE_AVG);
+               break;
 
-static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
-{
-       u32 val, rpe;
+       case HIGH_POWER:
+               /* Upclock if more than 85% busy over 10ms */
+               I915_WRITE(GEN6_RP_UP_EI, 8000);
+               I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
 
-       val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
-       rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
-       val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
-       rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
+               /* Downclock if less than 60% busy over 32ms */
+               I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+               I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
 
-       return rpe;
-}
+               I915_WRITE(GEN6_RP_CONTROL,
+                          GEN6_RP_MEDIA_TURBO |
+                          GEN6_RP_MEDIA_HW_NORMAL_MODE |
+                          GEN6_RP_MEDIA_IS_GFX |
+                          GEN6_RP_ENABLE |
+                          GEN6_RP_UP_BUSY_AVG |
+                          GEN6_RP_DOWN_IDLE_AVG);
+               break;
+       }
 
-static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
-{
-       return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
+       dev_priv->rps.power = new_power;
+       dev_priv->rps.last_adj = 0;
 }
 
-/* Check that the pctx buffer wasn't move under us. */
-static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
+static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
 {
-       unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
+       u32 mask = 0;
 
-       WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
-                            dev_priv->vlv_pctx->stolen->start);
-}
+       if (val > dev_priv->rps.min_freq_softlimit)
+               mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+       if (val < dev_priv->rps.max_freq_softlimit)
+               mask |= GEN6_PM_RP_UP_THRESHOLD;
 
+       mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
+       mask &= dev_priv->pm_rps_events;
 
-/* Check that the pcbr address is not empty. */
-static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
-{
-       unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
+       /* IVB and SNB hard hangs on looping batchbuffer
+        * if GEN6_PM_UP_EI_EXPIRED is masked.
+        */
+       if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
+               mask |= GEN6_PM_RP_UP_EI_EXPIRED;
 
-       WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
+       if (IS_GEN8(dev_priv->dev))
+               mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+
+       return ~mask;
 }
 
-static void cherryview_setup_pctx(struct drm_device *dev)
+/* gen6_set_rps is called to update the frequency request, but should also be
+ * called when the range (min_delay and max_delay) is modified so that we can
+ * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
+void gen6_set_rps(struct drm_device *dev, u8 val)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long pctx_paddr, paddr;
-       struct i915_gtt *gtt = &dev_priv->gtt;
-       u32 pcbr;
-       int pctx_size = 32*1024;
 
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+       WARN_ON(val > dev_priv->rps.max_freq_softlimit);
+       WARN_ON(val < dev_priv->rps.min_freq_softlimit);
 
-       pcbr = I915_READ(VLV_PCBR);
-       if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
-               paddr = (dev_priv->mm.stolen_base +
-                        (gtt->stolen_size - pctx_size));
+       /* min/max delay may still have been modified so be sure to
+        * write the limits value.
+        */
+       if (val != dev_priv->rps.cur_freq) {
+               gen6_set_rps_thresholds(dev_priv, val);
 
-               pctx_paddr = (paddr & (~4095));
-               I915_WRITE(VLV_PCBR, pctx_paddr);
-       }
-}
-
-static void valleyview_setup_pctx(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *pctx;
-       unsigned long pctx_paddr;
-       u32 pcbr;
-       int pctx_size = 24*1024;
-
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
-       pcbr = I915_READ(VLV_PCBR);
-       if (pcbr) {
-               /* BIOS set it up already, grab the pre-alloc'd space */
-               int pcbr_offset;
-
-               pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
-               pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
-                                                                     pcbr_offset,
-                                                                     I915_GTT_OFFSET_NONE,
-                                                                     pctx_size);
-               goto out;
+               if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+                       I915_WRITE(GEN6_RPNSWREQ,
+                                  HSW_FREQUENCY(val));
+               else
+                       I915_WRITE(GEN6_RPNSWREQ,
+                                  GEN6_FREQUENCY(val) |
+                                  GEN6_OFFSET(0) |
+                                  GEN6_AGGRESSIVE_TURBO);
        }
 
-       /*
-        * From the Gunit register HAS:
-        * The Gfx driver is expected to program this register and ensure
-        * proper allocation within Gfx stolen memory.  For example, this
-        * register should be programmed such than the PCBR range does not
-        * overlap with other ranges, such as the frame buffer, protected
-        * memory, or any other relevant ranges.
+       /* Make sure we continue to get interrupts
+        * until we hit the minimum or maximum frequencies.
         */
-       pctx = i915_gem_object_create_stolen(dev, pctx_size);
-       if (!pctx) {
-               DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
-               return;
-       }
+       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
+       I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
 
-       pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
-       I915_WRITE(VLV_PCBR, pctx_paddr);
+       POSTING_READ(GEN6_RPNSWREQ);
 
-out:
-       dev_priv->vlv_pctx = pctx;
+       dev_priv->rps.cur_freq = val;
+       trace_intel_gpu_freq_change(val * 50);
 }
 
-static void valleyview_cleanup_pctx(struct drm_device *dev)
+/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
+ *
+ * * If Gfx is Idle, then
+ * 1. Mask Turbo interrupts
+ * 2. Bring up Gfx clock
+ * 3. Change the freq to Rpn and wait till P-Unit updates freq
+ * 4. Clear the Force GFX CLK ON bit so that Gfx can down
+ * 5. Unmask Turbo interrupts
+*/
+static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_device *dev = dev_priv->dev;
 
-       if (WARN_ON(!dev_priv->vlv_pctx))
+       /* Latest VLV doesn't need to force the gfx clock */
+       if (dev->pdev->revision >= 0xd) {
+               valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
                return;
+       }
 
-       drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
-       dev_priv->vlv_pctx = NULL;
-}
-
-static void valleyview_init_gt_powersave(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 val;
+       /*
+        * When we are idle.  Drop to min voltage state.
+        */
 
-       valleyview_setup_pctx(dev);
+       if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
+               return;
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       /* Mask turbo interrupt so that they will not come in between */
+       I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
 
-       val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-       switch ((val >> 6) & 3) {
-       case 0:
-       case 1:
-               dev_priv->mem_freq = 800;
-               break;
-       case 2:
-               dev_priv->mem_freq = 1066;
-               break;
-       case 3:
-               dev_priv->mem_freq = 1333;
-               break;
-       }
-       DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
+       vlv_force_gfx_clock(dev_priv, true);
 
-       dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
-       dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
-       DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
-                        dev_priv->rps.max_freq);
+       dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
 
-       dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
-       DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-                        dev_priv->rps.efficient_freq);
+       vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
+                                       dev_priv->rps.min_freq_softlimit);
 
-       dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
-       DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
-                        dev_priv->rps.rp1_freq);
+       if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
+                               & GENFREQSTATUS) == 0, 100))
+               DRM_ERROR("timed out waiting for Punit\n");
 
-       dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
-       DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
-                        dev_priv->rps.min_freq);
+       vlv_force_gfx_clock(dev_priv, false);
 
-       /* Preserve min/max settings in case of re-init */
-       if (dev_priv->rps.max_freq_softlimit == 0)
-               dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+       I915_WRITE(GEN6_PMINTRMSK,
+                  gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+}
 
-       if (dev_priv->rps.min_freq_softlimit == 0)
-               dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+void gen6_rps_idle(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
 
+       mutex_lock(&dev_priv->rps.hw_lock);
+       if (dev_priv->rps.enabled) {
+               if (IS_CHERRYVIEW(dev))
+                       valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+               else if (IS_VALLEYVIEW(dev))
+                       vlv_set_rps_idle(dev_priv);
+               else
+                       gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+               dev_priv->rps.last_adj = 0;
+       }
        mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
-static void cherryview_init_gt_powersave(struct drm_device *dev)
+void gen6_rps_boost(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 val;
-
-       cherryview_setup_pctx(dev);
+       struct drm_device *dev = dev_priv->dev;
 
        mutex_lock(&dev_priv->rps.hw_lock);
-
-       val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
-       switch ((val >> 2) & 0x7) {
-       case 0:
-       case 1:
-               dev_priv->rps.cz_freq = 200;
-               dev_priv->mem_freq = 1600;
-               break;
-       case 2:
-               dev_priv->rps.cz_freq = 267;
-               dev_priv->mem_freq = 1600;
-               break;
-       case 3:
-               dev_priv->rps.cz_freq = 333;
-               dev_priv->mem_freq = 2000;
-               break;
-       case 4:
-               dev_priv->rps.cz_freq = 320;
-               dev_priv->mem_freq = 1600;
-               break;
-       case 5:
-               dev_priv->rps.cz_freq = 400;
-               dev_priv->mem_freq = 1600;
-               break;
+       if (dev_priv->rps.enabled) {
+               if (IS_VALLEYVIEW(dev))
+                       valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
+               else
+                       gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
+               dev_priv->rps.last_adj = 0;
        }
-       DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
+       mutex_unlock(&dev_priv->rps.hw_lock);
+}
 
-       dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
-       dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
-       DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
-                        dev_priv->rps.max_freq);
+void valleyview_set_rps(struct drm_device *dev, u8 val)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
-       DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-                        dev_priv->rps.efficient_freq);
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+       WARN_ON(val > dev_priv->rps.max_freq_softlimit);
+       WARN_ON(val < dev_priv->rps.min_freq_softlimit);
 
-       dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
-       DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
-                        dev_priv->rps.rp1_freq);
+       if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
+                     "Odd GPU freq value\n"))
+               val &= ~1;
 
-       dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
-       DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
-                        dev_priv->rps.min_freq);
+       if (val != dev_priv->rps.cur_freq)
+               vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
 
-       WARN_ONCE((dev_priv->rps.max_freq |
-                  dev_priv->rps.efficient_freq |
-                  dev_priv->rps.rp1_freq |
-                  dev_priv->rps.min_freq) & 1,
-                 "Odd GPU freq values\n");
+       I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
 
-       /* Preserve min/max settings in case of re-init */
-       if (dev_priv->rps.max_freq_softlimit == 0)
-               dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+       dev_priv->rps.cur_freq = val;
+       trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
+}
 
-       if (dev_priv->rps.min_freq_softlimit == 0)
-               dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+static void gen9_disable_rps(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       I915_WRITE(GEN6_RC_CONTROL, 0);
 }
 
-static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
+static void gen6_disable_rps(struct drm_device *dev)
 {
-       valleyview_cleanup_pctx(dev);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       I915_WRITE(GEN6_RC_CONTROL, 0);
+       I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
 }
 
-static void cherryview_enable_rps(struct drm_device *dev)
+static void cherryview_disable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring;
-       u32 gtfifodbg, val, rc6_mode = 0, pcbr;
-       int i;
 
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+       I915_WRITE(GEN6_RC_CONTROL, 0);
+}
 
-       gtfifodbg = I915_READ(GTFIFODBG);
-       if (gtfifodbg) {
-               DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
-                                gtfifodbg);
-               I915_WRITE(GTFIFODBG, gtfifodbg);
-       }
+static void valleyview_disable_rps(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       cherryview_check_pctx(dev_priv);
+       /* we're doing forcewake before Disabling RC6,
+        * This what the BIOS expects when going into suspend */
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
-       /* 1a & 1b: Get forcewake during program sequence. Although the driver
+       I915_WRITE(GEN6_RC_CONTROL, 0);
+
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
+{
+       if (IS_VALLEYVIEW(dev)) {
+               if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
+                       mode = GEN6_RC_CTL_RC6_ENABLE;
+               else
+                       mode = 0;
+       }
+       if (HAS_RC6p(dev))
+               DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
+                             (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+                             (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+                             (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+
+       else
+               DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
+                             (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
+}
+
+static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
+{
+       /* No RC6 before Ironlake */
+       if (INTEL_INFO(dev)->gen < 5)
+               return 0;
+
+       /* RC6 is only on Ironlake mobile not on desktop */
+       if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
+               return 0;
+
+       /* Respect the kernel parameter if it is set */
+       if (enable_rc6 >= 0) {
+               int mask;
+
+               if (HAS_RC6p(dev))
+                       mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
+                              INTEL_RC6pp_ENABLE;
+               else
+                       mask = INTEL_RC6_ENABLE;
+
+               if ((enable_rc6 & mask) != enable_rc6)
+                       DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
+                                     enable_rc6 & mask, enable_rc6, mask);
+
+               return enable_rc6 & mask;
+       }
+
+       /* Disable RC6 on Ironlake */
+       if (INTEL_INFO(dev)->gen == 5)
+               return 0;
+
+       if (IS_IVYBRIDGE(dev))
+               return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+
+       return INTEL_RC6_ENABLE;
+}
+
+int intel_enable_rc6(const struct drm_device *dev)
+{
+       return i915.enable_rc6;
+}
+
+static void gen6_init_rps_frequencies(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t rp_state_cap;
+       u32 ddcc_status = 0;
+       int ret;
+
+       rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+       /* All of these values are in units of 50MHz */
+       dev_priv->rps.cur_freq          = 0;
+       /* static values from HW: RP0 > RP1 > RPn (min_freq) */
+       dev_priv->rps.rp0_freq          = (rp_state_cap >>  0) & 0xff;
+       dev_priv->rps.rp1_freq          = (rp_state_cap >>  8) & 0xff;
+       dev_priv->rps.min_freq          = (rp_state_cap >> 16) & 0xff;
+       /* hw_max = RP0 until we check for overclocking */
+       dev_priv->rps.max_freq          = dev_priv->rps.rp0_freq;
+
+       dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+               ret = sandybridge_pcode_read(dev_priv,
+                                       HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+                                       &ddcc_status);
+               if (0 == ret)
+                       dev_priv->rps.efficient_freq =
+                               (ddcc_status >> 8) & 0xff;
+       }
+
+       /* Preserve min/max settings in case of re-init */
+       if (dev_priv->rps.max_freq_softlimit == 0)
+               dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+       if (dev_priv->rps.min_freq_softlimit == 0) {
+               if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+                       dev_priv->rps.min_freq_softlimit =
+                               /* max(RPe, 450 MHz) */
+                               max(dev_priv->rps.efficient_freq, (u8) 9);
+               else
+                       dev_priv->rps.min_freq_softlimit =
+                               dev_priv->rps.min_freq;
+       }
+}
+
+static void gen9_enable_rps(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring;
+       uint32_t rc6_mask = 0;
+       int unused;
+
+       /* 1a: Software RC state - RC0 */
+       I915_WRITE(GEN6_RC_STATE, 0);
+
+       /* 1b: Get forcewake during program sequence. Although the driver
         * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
        gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
-       /* 2a: Program RC6 thresholds.*/
-       I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+       /* 2a: Disable RC states. */
+       I915_WRITE(GEN6_RC_CONTROL, 0);
+
+       /* 2b: Program RC6 thresholds.*/
+       I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
        I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
        I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-
-       for_each_ring(ring, dev_priv, i)
+       for_each_ring(ring, dev_priv, unused)
                I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
        I915_WRITE(GEN6_RC_SLEEP, 0);
+       I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
 
-       I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+       /* 3a: Enable RC6 */
+       if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+               rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+       DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
+                       "on" : "off");
+       I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+                                  GEN6_RC_CTL_EI_MODE(1) |
+                                  rc6_mask);
 
-       /* allows RC6 residency counter to work */
-       I915_WRITE(VLV_COUNTER_CONTROL,
-                  _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
-                                     VLV_MEDIA_RC6_COUNT_EN |
-                                     VLV_RENDER_RC6_COUNT_EN));
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 
-       /* For now we assume BIOS is allocating and populating the PCBR  */
-       pcbr = I915_READ(VLV_PCBR);
+}
 
-       DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
+static void gen8_enable_rps(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring;
+       uint32_t rc6_mask = 0;
+       int unused;
 
-       /* 3: Enable RC6 */
-       if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
-                                               (pcbr >> VLV_PCBR_ADDR_SHIFT))
-               rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+       /* 1a: Software RC state - RC0 */
+       I915_WRITE(GEN6_RC_STATE, 0);
 
-       I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+       /* 1c & 1d: Get forcewake during program sequence. Although the driver
+        * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+       /* 2a: Disable RC states. */
+       I915_WRITE(GEN6_RC_CONTROL, 0);
+
+       /* Initialize rps frequencies */
+       gen6_init_rps_frequencies(dev);
+
+       /* 2b: Program RC6 thresholds.*/
+       I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+       I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+       I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+       for_each_ring(ring, dev_priv, unused)
+               I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+       I915_WRITE(GEN6_RC_SLEEP, 0);
+       if (IS_BROADWELL(dev))
+               I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
+       else
+               I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+       /* 3: Enable RC6 */
+       if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+               rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+       intel_print_rc6_info(dev, rc6_mask);
+       if (IS_BROADWELL(dev))
+               I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+                               GEN7_RC_CTL_TO_MODE |
+                               rc6_mask);
+       else
+               I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+                               GEN6_RC_CTL_EI_MODE(1) |
+                               rc6_mask);
 
        /* 4 Program defaults and thresholds for RPS*/
-       I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
-       I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
-       I915_WRITE(GEN6_RP_UP_EI, 66000);
-       I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+       I915_WRITE(GEN6_RPNSWREQ,
+                  HSW_FREQUENCY(dev_priv->rps.rp1_freq));
+       I915_WRITE(GEN6_RC_VIDEO_FREQ,
+                  HSW_FREQUENCY(dev_priv->rps.rp1_freq));
+       /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
+       I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
 
-       I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+       /* Docs recommend 900MHz, and 300 MHz respectively */
+       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+                  dev_priv->rps.max_freq_softlimit << 24 |
+                  dev_priv->rps.min_freq_softlimit << 16);
 
-       /* WaDisablePwrmtrEvent:chv (pre-production hw) */
-       I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
-       I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
+       I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
+       I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
+       I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
+       I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
+
+       I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 
        /* 5: Enable RPS */
        I915_WRITE(GEN6_RP_CONTROL,
+                  GEN6_RP_MEDIA_TURBO |
                   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-                  GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
+                  GEN6_RP_MEDIA_IS_GFX |
                   GEN6_RP_ENABLE |
                   GEN6_RP_UP_BUSY_AVG |
                   GEN6_RP_DOWN_IDLE_AVG);
 
-       val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
-       DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
-       DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
-
-       dev_priv->rps.cur_freq = (val >> 8) & 0xff;
-       DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
-                        dev_priv->rps.cur_freq);
-
-       DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-                        dev_priv->rps.efficient_freq);
-
-       valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+       /* 6: Ring frequency + overclocking (our driver does this later */
 
-       gen8_enable_rps_interrupts(dev);
+       dev_priv->rps.power = HIGH_POWER; /* force a reset */
+       gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
 
        gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static void valleyview_enable_rps(struct drm_device *dev)
+static void gen6_enable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring;
-       u32 gtfifodbg, val, rc6_mode = 0;
-       int i;
+       u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
+       u32 gtfifodbg;
+       int rc6_mode;
+       int i, ret;
 
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
-       valleyview_check_pctx(dev_priv);
+       /* Here begins a magic sequence of register writes to enable
+        * auto-downclocking.
+        *
+        * Perhaps there might be some value in exposing these to
+        * userspace...
+        */
+       I915_WRITE(GEN6_RC_STATE, 0);
 
+       /* Clear the DBG now so we don't confuse earlier errors */
        if ((gtfifodbg = I915_READ(GTFIFODBG))) {
-               DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
-                                gtfifodbg);
+               DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
                I915_WRITE(GTFIFODBG, gtfifodbg);
        }
 
-       /* If VLV, Forcewake all wells, else re-direct to regular path */
        gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
-       I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
-       I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
-       I915_WRITE(GEN6_RP_UP_EI, 66000);
-       I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
-       I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-       I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
+       /* Initialize rps frequencies */
+       gen6_init_rps_frequencies(dev);
 
-       I915_WRITE(GEN6_RP_CONTROL,
-                  GEN6_RP_MEDIA_TURBO |
-                  GEN6_RP_MEDIA_HW_NORMAL_MODE |
-                  GEN6_RP_MEDIA_IS_GFX |
-                  GEN6_RP_ENABLE |
-                  GEN6_RP_UP_BUSY_AVG |
-                  GEN6_RP_DOWN_IDLE_CONT);
+       /* disable the counters and set deterministic thresholds */
+       I915_WRITE(GEN6_RC_CONTROL, 0);
 
-       I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
+       I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+       I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+       I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
        I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
        I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 
        for_each_ring(ring, dev_priv, i)
                I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
 
-       I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
+       I915_WRITE(GEN6_RC_SLEEP, 0);
+       I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+       if (IS_IVYBRIDGE(dev))
+               I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
+       else
+               I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+       I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
+       I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
-       /* allows RC6 residency counter to work */
-       I915_WRITE(VLV_COUNTER_CONTROL,
-                  _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
-                                     VLV_RENDER_RC0_COUNT_EN |
-                                     VLV_MEDIA_RC6_COUNT_EN |
-                                     VLV_RENDER_RC6_COUNT_EN));
+       /* Check if we are enabling RC6 */
+       rc6_mode = intel_enable_rc6(dev_priv->dev);
+       if (rc6_mode & INTEL_RC6_ENABLE)
+               rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
 
-       if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
-               rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
+       /* We don't use those on Haswell */
+       if (!IS_HASWELL(dev)) {
+               if (rc6_mode & INTEL_RC6p_ENABLE)
+                       rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
 
-       intel_print_rc6_info(dev, rc6_mode);
+               if (rc6_mode & INTEL_RC6pp_ENABLE)
+                       rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+       }
 
-       I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+       intel_print_rc6_info(dev, rc6_mask);
 
-       val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+       I915_WRITE(GEN6_RC_CONTROL,
+                  rc6_mask |
+                  GEN6_RC_CTL_EI_MODE(1) |
+                  GEN6_RC_CTL_HW_ENABLE);
 
-       DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
-       DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+       /* Power down if completely idle for over 50ms */
+       I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
+       I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 
-       dev_priv->rps.cur_freq = (val >> 8) & 0xff;
-       DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
-                        dev_priv->rps.cur_freq);
+       ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
+       if (ret)
+               DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
 
-       DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-                        dev_priv->rps.efficient_freq);
+       ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
+       if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
+               DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
+                                (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
+                                (pcu_mbox & 0xff) * 50);
+               dev_priv->rps.max_freq = pcu_mbox & 0xff;
+       }
 
-       valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+       dev_priv->rps.power = HIGH_POWER; /* force a reset */
+       gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
 
-       gen6_enable_rps_interrupts(dev);
+       rc6vids = 0;
+       ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+       if (IS_GEN6(dev) && ret) {
+               DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+       } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+               DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+                         GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+               rc6vids &= 0xffff00;
+               rc6vids |= GEN6_ENCODE_RC6_VID(450);
+               ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+               if (ret)
+                       DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+       }
 
        gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-void ironlake_teardown_rc6(struct drm_device *dev)
+static void __gen6_update_ring_freq(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       int min_freq = 15;
+       unsigned int gpu_freq;
+       unsigned int max_ia_freq, min_ring_freq;
+       int scaling_factor = 180;
+       struct cpufreq_policy *policy;
 
-       if (dev_priv->ips.renderctx) {
-               i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
-               drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
-               dev_priv->ips.renderctx = NULL;
-       }
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
-       if (dev_priv->ips.pwrctx) {
-               i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
-               drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
-               dev_priv->ips.pwrctx = NULL;
+       policy = cpufreq_cpu_get(0);
+       if (policy) {
+               max_ia_freq = policy->cpuinfo.max_freq;
+               cpufreq_cpu_put(policy);
+       } else {
+               /*
+                * Default to measured freq if none found, PCU will ensure we
+                * don't go over
+                */
+               max_ia_freq = tsc_khz;
        }
-}
 
-static void ironlake_disable_rc6(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       /* Convert from kHz to MHz */
+       max_ia_freq /= 1000;
 
-       if (I915_READ(PWRCTXA)) {
-               /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
-               I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
-               wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
-                        50);
+       min_ring_freq = I915_READ(DCLK) & 0xf;
+       /* convert DDR frequency from units of 266.6MHz to bandwidth */
+       min_ring_freq = mult_frac(min_ring_freq, 8, 3);
 
-               I915_WRITE(PWRCTXA, 0);
-               POSTING_READ(PWRCTXA);
+       /*
+        * For each potential GPU frequency, load a ring frequency we'd like
+        * to use for memory access.  We do this by specifying the IA frequency
+        * the PCU should use as a reference to determine the ring frequency.
+        */
+       for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
+            gpu_freq--) {
+               int diff = dev_priv->rps.max_freq - gpu_freq;
+               unsigned int ia_freq = 0, ring_freq = 0;
 
-               I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
-               POSTING_READ(RSTDBYCTL);
+               if (INTEL_INFO(dev)->gen >= 8) {
+                       /* max(2 * GT, DDR). NB: GT is 50MHz units */
+                       ring_freq = max(min_ring_freq, gpu_freq);
+               } else if (IS_HASWELL(dev)) {
+                       ring_freq = mult_frac(gpu_freq, 5, 4);
+                       ring_freq = max(min_ring_freq, ring_freq);
+                       /* leave ia_freq as the default, chosen by cpufreq */
+               } else {
+                       /* On older processors, there is no separate ring
+                        * clock domain, so in order to boost the bandwidth
+                        * of the ring, we need to upclock the CPU (ia_freq).
+                        *
+                        * For GPU frequencies less than 750MHz,
+                        * just use the lowest ring freq.
+                        */
+                       if (gpu_freq < min_freq)
+                               ia_freq = 800;
+                       else
+                               ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
+                       ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+               }
+
+               sandybridge_pcode_write(dev_priv,
+                                       GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+                                       ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
+                                       ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
+                                       gpu_freq);
        }
 }
 
-static int ironlake_setup_rc6(struct drm_device *dev)
+void gen6_update_ring_freq(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (dev_priv->ips.renderctx == NULL)
-               dev_priv->ips.renderctx = intel_alloc_context_page(dev);
-       if (!dev_priv->ips.renderctx)
-               return -ENOMEM;
-
-       if (dev_priv->ips.pwrctx == NULL)
-               dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
-       if (!dev_priv->ips.pwrctx) {
-               ironlake_teardown_rc6(dev);
-               return -ENOMEM;
-       }
+       if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
+               return;
 
-       return 0;
+       mutex_lock(&dev_priv->rps.hw_lock);
+       __gen6_update_ring_freq(dev);
+       mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
-static void ironlake_enable_rc6(struct drm_device *dev)
+static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
-       bool was_interruptible;
-       int ret;
+       u32 val, rp0;
 
-       /* rc6 disabled by default due to repeated reports of hanging during
-        * boot and resume.
-        */
-       if (!intel_enable_rc6(dev))
-               return;
+       val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+       rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
 
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       return rp0;
+}
 
-       ret = ironlake_setup_rc6(dev);
-       if (ret)
-               return;
+static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
+{
+       u32 val, rpe;
 
-       was_interruptible = dev_priv->mm.interruptible;
-       dev_priv->mm.interruptible = false;
+       val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
+       rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
 
-       /*
-        * GPU can automatically power down the render unit if given a page
-        * to save state.
-        */
-       ret = intel_ring_begin(ring, 6);
-       if (ret) {
-               ironlake_teardown_rc6(dev);
-               dev_priv->mm.interruptible = was_interruptible;
-               return;
-       }
+       return rpe;
+}
 
-       intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
-       intel_ring_emit(ring, MI_SET_CONTEXT);
-       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
-                       MI_MM_SPACE_GTT |
-                       MI_SAVE_EXT_STATE_EN |
-                       MI_RESTORE_EXT_STATE_EN |
-                       MI_RESTORE_INHIBIT);
-       intel_ring_emit(ring, MI_SUSPEND_FLUSH);
-       intel_ring_emit(ring, MI_NOOP);
-       intel_ring_emit(ring, MI_FLUSH);
-       intel_ring_advance(ring);
+static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
+{
+       u32 val, rp1;
 
-       /*
-        * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
-        * does an implicit flush, combined with MI_FLUSH above, it should be
-        * safe to assume that renderctx is valid
-        */
-       ret = intel_ring_idle(ring);
-       dev_priv->mm.interruptible = was_interruptible;
-       if (ret) {
-               DRM_ERROR("failed to enable ironlake power savings\n");
-               ironlake_teardown_rc6(dev);
-               return;
-       }
+       val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+       rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
 
-       I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
-       I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+       return rp1;
+}
 
-       intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
+static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
+{
+       u32 val, rpn;
+
+       val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+       rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
+       return rpn;
 }
 
-static unsigned long intel_pxfreq(u32 vidfreq)
+static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
 {
-       unsigned long freq;
-       int div = (vidfreq & 0x3f0000) >> 16;
-       int post = (vidfreq & 0x3000) >> 12;
-       int pre = (vidfreq & 0x7);
+       u32 val, rp1;
 
-       if (!pre)
-               return 0;
+       val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
 
-       freq = ((div * 133333) / ((1<<post) * pre));
+       rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
 
-       return freq;
+       return rp1;
 }
 
-static const struct cparams {
-       u16 i;
-       u16 t;
-       u16 m;
-       u16 c;
-} cparams[] = {
-       { 1, 1333, 301, 28664 },
-       { 1, 1066, 294, 24460 },
-       { 1, 800, 294, 25192 },
-       { 0, 1333, 276, 27605 },
-       { 0, 1066, 276, 27605 },
-       { 0, 800, 231, 23784 },
-};
+static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
+{
+       u32 val, rp0;
 
-static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
+       val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+       rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
+       /* Clamp to max */
+       rp0 = min_t(u32, rp0, 0xea);
+
+       return rp0;
+}
+
+static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
 {
-       u64 total_count, diff, ret;
-       u32 count1, count2, count3, m = 0, c = 0;
-       unsigned long now = jiffies_to_msecs(jiffies), diff1;
-       int i;
+       u32 val, rpe;
 
-       assert_spin_locked(&mchdev_lock);
+       val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
+       rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
+       val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
+       rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
 
-       diff1 = now - dev_priv->ips.last_time1;
+       return rpe;
+}
 
-       /* Prevent division-by-zero if we are asking too fast.
-        * Also, we don't get interesting results if we are polling
-        * faster than once in 10ms, so just return the saved value
-        * in such cases.
-        */
-       if (diff1 <= 10)
-               return dev_priv->ips.chipset_power;
+static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
+{
+       return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
+}
 
-       count1 = I915_READ(DMIEC);
-       count2 = I915_READ(DDREC);
-       count3 = I915_READ(CSIEC);
+/* Check that the pctx buffer wasn't move under us. */
+static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
+{
+       unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
 
-       total_count = count1 + count2 + count3;
+       WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
+                            dev_priv->vlv_pctx->stolen->start);
+}
 
-       /* FIXME: handle per-counter overflow */
-       if (total_count < dev_priv->ips.last_count1) {
-               diff = ~0UL - dev_priv->ips.last_count1;
-               diff += total_count;
-       } else {
-               diff = total_count - dev_priv->ips.last_count1;
-       }
 
-       for (i = 0; i < ARRAY_SIZE(cparams); i++) {
-               if (cparams[i].i == dev_priv->ips.c_m &&
-                   cparams[i].t == dev_priv->ips.r_t) {
-                       m = cparams[i].m;
-                       c = cparams[i].c;
-                       break;
-               }
-       }
+/* Check that the pcbr address is not empty. */
+static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
+{
+       unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
 
-       diff = div_u64(diff, diff1);
-       ret = ((m * diff) + c);
-       ret = div_u64(ret, 10);
+       WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
+}
 
-       dev_priv->ips.last_count1 = total_count;
-       dev_priv->ips.last_time1 = now;
+static void cherryview_setup_pctx(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long pctx_paddr, paddr;
+       struct i915_gtt *gtt = &dev_priv->gtt;
+       u32 pcbr;
+       int pctx_size = 32*1024;
 
-       dev_priv->ips.chipset_power = ret;
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       return ret;
+       pcbr = I915_READ(VLV_PCBR);
+       if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+               DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+               paddr = (dev_priv->mm.stolen_base +
+                        (gtt->stolen_size - pctx_size));
+
+               pctx_paddr = (paddr & (~4095));
+               I915_WRITE(VLV_PCBR, pctx_paddr);
+       }
+
+       DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
 }
 
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+static void valleyview_setup_pctx(struct drm_device *dev)
 {
-       struct drm_device *dev = dev_priv->dev;
-       unsigned long val;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *pctx;
+       unsigned long pctx_paddr;
+       u32 pcbr;
+       int pctx_size = 24*1024;
 
-       if (INTEL_INFO(dev)->gen != 5)
-               return 0;
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       spin_lock_irq(&mchdev_lock);
+       pcbr = I915_READ(VLV_PCBR);
+       if (pcbr) {
+               /* BIOS set it up already, grab the pre-alloc'd space */
+               int pcbr_offset;
 
-       val = __i915_chipset_val(dev_priv);
+               pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
+               pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
+                                                                     pcbr_offset,
+                                                                     I915_GTT_OFFSET_NONE,
+                                                                     pctx_size);
+               goto out;
+       }
 
-       spin_unlock_irq(&mchdev_lock);
+       DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
 
-       return val;
+       /*
+        * From the Gunit register HAS:
+        * The Gfx driver is expected to program this register and ensure
+        * proper allocation within Gfx stolen memory.  For example, this
+        * register should be programmed such than the PCBR range does not
+        * overlap with other ranges, such as the frame buffer, protected
+        * memory, or any other relevant ranges.
+        */
+       pctx = i915_gem_object_create_stolen(dev, pctx_size);
+       if (!pctx) {
+               DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
+               return;
+       }
+
+       pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
+       I915_WRITE(VLV_PCBR, pctx_paddr);
+
+out:
+       DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
+       dev_priv->vlv_pctx = pctx;
 }
 
-unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+static void valleyview_cleanup_pctx(struct drm_device *dev)
 {
-       unsigned long m, x, b;
-       u32 tsfs;
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       tsfs = I915_READ(TSFS);
+       if (WARN_ON(!dev_priv->vlv_pctx))
+               return;
 
-       m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
-       x = I915_READ8(TR1);
+       drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
+       dev_priv->vlv_pctx = NULL;
+}
 
-       b = tsfs & TSFS_INTR_MASK;
+static void valleyview_init_gt_powersave(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 val;
 
-       return ((m * x) / 127) - b;
+       valleyview_setup_pctx(dev);
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+       val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+       switch ((val >> 6) & 3) {
+       case 0:
+       case 1:
+               dev_priv->mem_freq = 800;
+               break;
+       case 2:
+               dev_priv->mem_freq = 1066;
+               break;
+       case 3:
+               dev_priv->mem_freq = 1333;
+               break;
+       }
+       DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
+
+       dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
+       dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+       DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+                        dev_priv->rps.max_freq);
+
+       dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
+       DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+                        dev_priv->rps.efficient_freq);
+
+       dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
+       DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+                        dev_priv->rps.rp1_freq);
+
+       dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
+       DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+                        dev_priv->rps.min_freq);
+
+       /* Preserve min/max settings in case of re-init */
+       if (dev_priv->rps.max_freq_softlimit == 0)
+               dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+       if (dev_priv->rps.min_freq_softlimit == 0)
+               dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+
+       mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
-static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+static void cherryview_init_gt_powersave(struct drm_device *dev)
 {
-       struct drm_device *dev = dev_priv->dev;
-       static const struct v_table {
-               u16 vd; /* in .1 mil */
-               u16 vm; /* in .1 mil */
-       } v_table[] = {
-               { 0, 0, },
-               { 375, 0, },
-               { 500, 0, },
-               { 625, 0, },
-               { 750, 0, },
-               { 875, 0, },
-               { 1000, 0, },
-               { 1125, 0, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4250, 3125, },
-               { 4375, 3250, },
-               { 4500, 3375, },
-               { 4625, 3500, },
-               { 4750, 3625, },
-               { 4875, 3750, },
-               { 5000, 3875, },
-               { 5125, 4000, },
-               { 5250, 4125, },
-               { 5375, 4250, },
-               { 5500, 4375, },
-               { 5625, 4500, },
-               { 5750, 4625, },
-               { 5875, 4750, },
-               { 6000, 4875, },
-               { 6125, 5000, },
-               { 6250, 5125, },
-               { 6375, 5250, },
-               { 6500, 5375, },
-               { 6625, 5500, },
-               { 6750, 5625, },
-               { 6875, 5750, },
-               { 7000, 5875, },
-               { 7125, 6000, },
-               { 7250, 6125, },
-               { 7375, 6250, },
-               { 7500, 6375, },
-               { 7625, 6500, },
-               { 7750, 6625, },
-               { 7875, 6750, },
-               { 8000, 6875, },
-               { 8125, 7000, },
-               { 8250, 7125, },
-               { 8375, 7250, },
-               { 8500, 7375, },
-               { 8625, 7500, },
-               { 8750, 7625, },
-               { 8875, 7750, },
-               { 9000, 7875, },
-               { 9125, 8000, },
-               { 9250, 8125, },
-               { 9375, 8250, },
-               { 9500, 8375, },
-               { 9625, 8500, },
-               { 9750, 8625, },
-               { 9875, 8750, },
-               { 10000, 8875, },
-               { 10125, 9000, },
-               { 10250, 9125, },
-               { 10375, 9250, },
-               { 10500, 9375, },
-               { 10625, 9500, },
-               { 10750, 9625, },
-               { 10875, 9750, },
-               { 11000, 9875, },
-               { 11125, 10000, },
-               { 11250, 10125, },
-               { 11375, 10250, },
-               { 11500, 10375, },
-               { 11625, 10500, },
-               { 11750, 10625, },
-               { 11875, 10750, },
-               { 12000, 10875, },
-               { 12125, 11000, },
-               { 12250, 11125, },
-               { 12375, 11250, },
-               { 12500, 11375, },
-               { 12625, 11500, },
-               { 12750, 11625, },
-               { 12875, 11750, },
-               { 13000, 11875, },
-               { 13125, 12000, },
-               { 13250, 12125, },
-               { 13375, 12250, },
-               { 13500, 12375, },
-               { 13625, 12500, },
-               { 13750, 12625, },
-               { 13875, 12750, },
-               { 14000, 12875, },
-               { 14125, 13000, },
-               { 14250, 13125, },
-               { 14375, 13250, },
-               { 14500, 13375, },
-               { 14625, 13500, },
-               { 14750, 13625, },
-               { 14875, 13750, },
-               { 15000, 13875, },
-               { 15125, 14000, },
-               { 15250, 14125, },
-               { 15375, 14250, },
-               { 15500, 14375, },
-               { 15625, 14500, },
-               { 15750, 14625, },
-               { 15875, 14750, },
-               { 16000, 14875, },
-               { 16125, 15000, },
-       };
-       if (INTEL_INFO(dev)->is_mobile)
-               return v_table[pxvid].vm;
-       else
-               return v_table[pxvid].vd;
-}
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 val;
 
-static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
-{
-       u64 now, diff, diffms;
-       u32 count;
+       cherryview_setup_pctx(dev);
 
-       assert_spin_locked(&mchdev_lock);
+       mutex_lock(&dev_priv->rps.hw_lock);
 
-       now = ktime_get_raw_ns();
-       diffms = now - dev_priv->ips.last_time2;
-       do_div(diffms, NSEC_PER_MSEC);
+       mutex_lock(&dev_priv->dpio_lock);
+       val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
+       mutex_unlock(&dev_priv->dpio_lock);
 
-       /* Don't divide by 0 */
-       if (!diffms)
-               return;
+       switch ((val >> 2) & 0x7) {
+       case 0:
+       case 1:
+               dev_priv->rps.cz_freq = 200;
+               dev_priv->mem_freq = 1600;
+               break;
+       case 2:
+               dev_priv->rps.cz_freq = 267;
+               dev_priv->mem_freq = 1600;
+               break;
+       case 3:
+               dev_priv->rps.cz_freq = 333;
+               dev_priv->mem_freq = 2000;
+               break;
+       case 4:
+               dev_priv->rps.cz_freq = 320;
+               dev_priv->mem_freq = 1600;
+               break;
+       case 5:
+               dev_priv->rps.cz_freq = 400;
+               dev_priv->mem_freq = 1600;
+               break;
+       }
+       DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
 
-       count = I915_READ(GFXEC);
+       dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
+       dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+       DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+                        dev_priv->rps.max_freq);
 
-       if (count < dev_priv->ips.last_count2) {
-               diff = ~0UL - dev_priv->ips.last_count2;
-               diff += count;
-       } else {
-               diff = count - dev_priv->ips.last_count2;
-       }
+       dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
+       DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+                        dev_priv->rps.efficient_freq);
 
-       dev_priv->ips.last_count2 = count;
-       dev_priv->ips.last_time2 = now;
+       dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
+       DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+                        dev_priv->rps.rp1_freq);
 
-       /* More magic constants... */
-       diff = diff * 1181;
-       diff = div_u64(diff, diffms * 10);
-       dev_priv->ips.gfx_power = diff;
-}
+       dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
+       DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+                        dev_priv->rps.min_freq);
 
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
+       WARN_ONCE((dev_priv->rps.max_freq |
+                  dev_priv->rps.efficient_freq |
+                  dev_priv->rps.rp1_freq |
+                  dev_priv->rps.min_freq) & 1,
+                 "Odd GPU freq values\n");
 
-       if (INTEL_INFO(dev)->gen != 5)
-               return;
+       /* Preserve min/max settings in case of re-init */
+       if (dev_priv->rps.max_freq_softlimit == 0)
+               dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
 
-       spin_lock_irq(&mchdev_lock);
+       if (dev_priv->rps.min_freq_softlimit == 0)
+               dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
 
-       __i915_update_gfx_val(dev_priv);
+       mutex_unlock(&dev_priv->rps.hw_lock);
+}
 
-       spin_unlock_irq(&mchdev_lock);
+static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
+{
+       valleyview_cleanup_pctx(dev);
 }
 
-static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
+static void cherryview_enable_rps(struct drm_device *dev)
 {
-       unsigned long t, corr, state1, corr2, state2;
-       u32 pxvid, ext_v;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring;
+       u32 gtfifodbg, val, rc6_mode = 0, pcbr;
+       int i;
 
-       assert_spin_locked(&mchdev_lock);
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
-       pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
-       pxvid = (pxvid >> 24) & 0x7f;
-       ext_v = pvid_to_extvid(dev_priv, pxvid);
+       gtfifodbg = I915_READ(GTFIFODBG);
+       if (gtfifodbg) {
+               DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+                                gtfifodbg);
+               I915_WRITE(GTFIFODBG, gtfifodbg);
+       }
 
-       state1 = ext_v;
+       cherryview_check_pctx(dev_priv);
 
-       t = i915_mch_val(dev_priv);
+       /* 1a & 1b: Get forcewake during program sequence. Although the driver
+        * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
-       /* Revel in the empirically derived constants */
+       /* 2a: Program RC6 thresholds.*/
+       I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+       I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+       I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
 
-       /* Correction factor in 1/100000 units */
-       if (t > 80)
-               corr = ((t * 2349) + 135940);
-       else if (t >= 50)
-               corr = ((t * 964) + 29317);
-       else /* < 50 */
-               corr = ((t * 301) + 1004);
+       for_each_ring(ring, dev_priv, i)
+               I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+       I915_WRITE(GEN6_RC_SLEEP, 0);
 
-       corr = corr * ((150142 * state1) / 10000 - 78642);
-       corr /= 100000;
-       corr2 = (corr * dev_priv->ips.corr);
+       I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
 
-       state2 = (corr2 * state1) / 10000;
-       state2 /= 100; /* convert to mW */
+       /* allows RC6 residency counter to work */
+       I915_WRITE(VLV_COUNTER_CONTROL,
+                  _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+                                     VLV_MEDIA_RC6_COUNT_EN |
+                                     VLV_RENDER_RC6_COUNT_EN));
 
-       __i915_update_gfx_val(dev_priv);
+       /* For now we assume BIOS is allocating and populating the PCBR  */
+       pcbr = I915_READ(VLV_PCBR);
 
-       return dev_priv->ips.gfx_power + state2;
-}
+       /* 3: Enable RC6 */
+       if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
+                                               (pcbr >> VLV_PCBR_ADDR_SHIFT))
+               rc6_mode = GEN6_RC_CTL_EI_MODE(1);
 
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-       unsigned long val;
+       I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
-       if (INTEL_INFO(dev)->gen != 5)
-               return 0;
+       /* 4 Program defaults and thresholds for RPS*/
+       I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+       I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+       I915_WRITE(GEN6_RP_UP_EI, 66000);
+       I915_WRITE(GEN6_RP_DOWN_EI, 350000);
 
-       spin_lock_irq(&mchdev_lock);
+       I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 
-       val = __i915_gfx_val(dev_priv);
+       /* WaDisablePwrmtrEvent:chv (pre-production hw) */
+       I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
+       I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
 
-       spin_unlock_irq(&mchdev_lock);
+       /* 5: Enable RPS */
+       I915_WRITE(GEN6_RP_CONTROL,
+                  GEN6_RP_MEDIA_HW_NORMAL_MODE |
+                  GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
+                  GEN6_RP_ENABLE |
+                  GEN6_RP_UP_BUSY_AVG |
+                  GEN6_RP_DOWN_IDLE_AVG);
 
-       return val;
-}
+       val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 
-/**
- * i915_read_mch_val - return value for IPS use
- *
- * Calculate and return a value for the IPS driver to use when deciding whether
- * we have thermal and power headroom to increase CPU or GPU power budget.
- */
-unsigned long i915_read_mch_val(void)
-{
-       struct drm_i915_private *dev_priv;
-       unsigned long chipset_val, graphics_val, ret = 0;
+       /* RPS code assumes GPLL is used */
+       WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
 
-       spin_lock_irq(&mchdev_lock);
-       if (!i915_mch_dev)
-               goto out_unlock;
-       dev_priv = i915_mch_dev;
+       DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
+       DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
-       chipset_val = __i915_chipset_val(dev_priv);
-       graphics_val = __i915_gfx_val(dev_priv);
+       dev_priv->rps.cur_freq = (val >> 8) & 0xff;
+       DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+                        dev_priv->rps.cur_freq);
 
-       ret = chipset_val + graphics_val;
+       DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+                        dev_priv->rps.efficient_freq);
 
-out_unlock:
-       spin_unlock_irq(&mchdev_lock);
+       valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
 
-       return ret;
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 }
-EXPORT_SYMBOL_GPL(i915_read_mch_val);
 
-/**
- * i915_gpu_raise - raise GPU frequency limit
- *
- * Raise the limit; IPS indicates we have thermal headroom.
- */
-bool i915_gpu_raise(void)
+static void valleyview_enable_rps(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv;
-       bool ret = true;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring;
+       u32 gtfifodbg, val, rc6_mode = 0;
+       int i;
 
-       spin_lock_irq(&mchdev_lock);
-       if (!i915_mch_dev) {
-               ret = false;
-               goto out_unlock;
-       }
-       dev_priv = i915_mch_dev;
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
-       if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
-               dev_priv->ips.max_delay--;
+       valleyview_check_pctx(dev_priv);
 
-out_unlock:
-       spin_unlock_irq(&mchdev_lock);
+       if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+               DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+                                gtfifodbg);
+               I915_WRITE(GTFIFODBG, gtfifodbg);
+       }
 
-       return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_raise);
+       /* If VLV, Forcewake all wells, else re-direct to regular path */
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
-/**
- * i915_gpu_lower - lower GPU frequency limit
- *
- * IPS indicates we're close to a thermal limit, so throttle back the GPU
- * frequency maximum.
- */
-bool i915_gpu_lower(void)
-{
-       struct drm_i915_private *dev_priv;
-       bool ret = true;
+       I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+       I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+       I915_WRITE(GEN6_RP_UP_EI, 66000);
+       I915_WRITE(GEN6_RP_DOWN_EI, 350000);
 
-       spin_lock_irq(&mchdev_lock);
-       if (!i915_mch_dev) {
-               ret = false;
-               goto out_unlock;
-       }
-       dev_priv = i915_mch_dev;
+       I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+       I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
 
-       if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
-               dev_priv->ips.max_delay++;
+       I915_WRITE(GEN6_RP_CONTROL,
+                  GEN6_RP_MEDIA_TURBO |
+                  GEN6_RP_MEDIA_HW_NORMAL_MODE |
+                  GEN6_RP_MEDIA_IS_GFX |
+                  GEN6_RP_ENABLE |
+                  GEN6_RP_UP_BUSY_AVG |
+                  GEN6_RP_DOWN_IDLE_CONT);
 
-out_unlock:
-       spin_unlock_irq(&mchdev_lock);
+       I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
+       I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+       I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 
-       return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_lower);
+       for_each_ring(ring, dev_priv, i)
+               I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
 
-/**
- * i915_gpu_busy - indicate GPU business to IPS
- *
- * Tell the IPS driver whether or not the GPU is busy.
- */
-bool i915_gpu_busy(void)
-{
-       struct drm_i915_private *dev_priv;
-       struct intel_engine_cs *ring;
-       bool ret = false;
-       int i;
+       I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
 
-       spin_lock_irq(&mchdev_lock);
-       if (!i915_mch_dev)
-               goto out_unlock;
-       dev_priv = i915_mch_dev;
+       /* allows RC6 residency counter to work */
+       I915_WRITE(VLV_COUNTER_CONTROL,
+                  _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
+                                     VLV_RENDER_RC0_COUNT_EN |
+                                     VLV_MEDIA_RC6_COUNT_EN |
+                                     VLV_RENDER_RC6_COUNT_EN));
 
-       for_each_ring(ring, dev_priv, i)
-               ret |= !list_empty(&ring->request_list);
+       if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+               rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
 
-out_unlock:
-       spin_unlock_irq(&mchdev_lock);
+       intel_print_rc6_info(dev, rc6_mode);
 
-       return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_busy);
+       I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
-/**
- * i915_gpu_turbo_disable - disable graphics turbo
- *
- * Disable graphics turbo by resetting the max frequency and setting the
- * current frequency to the default.
- */
-bool i915_gpu_turbo_disable(void)
-{
-       struct drm_i915_private *dev_priv;
-       bool ret = true;
+       val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 
-       spin_lock_irq(&mchdev_lock);
-       if (!i915_mch_dev) {
-               ret = false;
-               goto out_unlock;
-       }
-       dev_priv = i915_mch_dev;
+       /* RPS code assumes GPLL is used */
+       WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
 
-       dev_priv->ips.max_delay = dev_priv->ips.fstart;
+       DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
+       DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
-       if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
-               ret = false;
+       dev_priv->rps.cur_freq = (val >> 8) & 0xff;
+       DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+                        dev_priv->rps.cur_freq);
 
-out_unlock:
-       spin_unlock_irq(&mchdev_lock);
+       DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+                        dev_priv->rps.efficient_freq);
 
-       return ret;
+       valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 }
-EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
 
-/**
- * Tells the intel_ips driver that the i915 driver is now loaded, if
- * IPS got loaded first.
- *
- * This awkward dance is so that neither module has to depend on the
- * other in order for IPS to do the appropriate communication of
- * GPU turbo limits to i915.
- */
-static void
-ips_ping_for_i915_load(void)
+void ironlake_teardown_rc6(struct drm_device *dev)
 {
-       void (*link)(void);
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       link = symbol_get(ips_link_to_i915_driver);
-       if (link) {
-               link();
-               symbol_put(ips_link_to_i915_driver);
+       if (dev_priv->ips.renderctx) {
+               i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
+               drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
+               dev_priv->ips.renderctx = NULL;
+       }
+
+       if (dev_priv->ips.pwrctx) {
+               i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
+               drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
+               dev_priv->ips.pwrctx = NULL;
        }
 }
 
-void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
+static void ironlake_disable_rc6(struct drm_device *dev)
 {
-       /* We only register the i915 ips part with intel-ips once everything is
-        * set up, to avoid intel-ips sneaking in and reading bogus values. */
-       spin_lock_irq(&mchdev_lock);
-       i915_mch_dev = dev_priv;
-       spin_unlock_irq(&mchdev_lock);
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       ips_ping_for_i915_load();
-}
+       if (I915_READ(PWRCTXA)) {
+               /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+               I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+               wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+                        50);
 
-void intel_gpu_ips_teardown(void)
-{
-       spin_lock_irq(&mchdev_lock);
-       i915_mch_dev = NULL;
-       spin_unlock_irq(&mchdev_lock);
+               I915_WRITE(PWRCTXA, 0);
+               POSTING_READ(PWRCTXA);
+
+               I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+               POSTING_READ(RSTDBYCTL);
+       }
 }
 
-static void intel_init_emon(struct drm_device *dev)
+static int ironlake_setup_rc6(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 lcfuse;
-       u8 pxw[16];
-       int i;
-
-       /* Disable to program */
-       I915_WRITE(ECR, 0);
-       POSTING_READ(ECR);
-
-       /* Program energy weights for various events */
-       I915_WRITE(SDEW, 0x15040d00);
-       I915_WRITE(CSIEW0, 0x007f0000);
-       I915_WRITE(CSIEW1, 0x1e220004);
-       I915_WRITE(CSIEW2, 0x04000004);
-
-       for (i = 0; i < 5; i++)
-               I915_WRITE(PEW + (i * 4), 0);
-       for (i = 0; i < 3; i++)
-               I915_WRITE(DEW + (i * 4), 0);
 
-       /* Program P-state weights to account for frequency power adjustment */
-       for (i = 0; i < 16; i++) {
-               u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
-               unsigned long freq = intel_pxfreq(pxvidfreq);
-               unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
-                       PXVFREQ_PX_SHIFT;
-               unsigned long val;
+       if (dev_priv->ips.renderctx == NULL)
+               dev_priv->ips.renderctx = intel_alloc_context_page(dev);
+       if (!dev_priv->ips.renderctx)
+               return -ENOMEM;
 
-               val = vid * vid;
-               val *= (freq / 1000);
-               val *= 255;
-               val /= (127*127*900);
-               if (val > 0xff)
-                       DRM_ERROR("bad pxval: %ld\n", val);
-               pxw[i] = val;
+       if (dev_priv->ips.pwrctx == NULL)
+               dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
+       if (!dev_priv->ips.pwrctx) {
+               ironlake_teardown_rc6(dev);
+               return -ENOMEM;
        }
-       /* Render standby states get 0 weight */
-       pxw[14] = 0;
-       pxw[15] = 0;
 
-       for (i = 0; i < 4; i++) {
-               u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
-                       (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
-               I915_WRITE(PXW + (i * 4), val);
-       }
+       return 0;
+}
 
-       /* Adjust magic regs to magic values (more experimental results) */
-       I915_WRITE(OGW0, 0);
-       I915_WRITE(OGW1, 0);
-       I915_WRITE(EG0, 0x00007f00);
-       I915_WRITE(EG1, 0x0000000e);
-       I915_WRITE(EG2, 0x000e0000);
-       I915_WRITE(EG3, 0x68000300);
-       I915_WRITE(EG4, 0x42000000);
-       I915_WRITE(EG5, 0x00140031);
-       I915_WRITE(EG6, 0);
-       I915_WRITE(EG7, 0);
+static void ironlake_enable_rc6(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+       bool was_interruptible;
+       int ret;
 
-       for (i = 0; i < 8; i++)
-               I915_WRITE(PXWL + (i * 4), 0);
+       /* rc6 disabled by default due to repeated reports of hanging during
+        * boot and resume.
+        */
+       if (!intel_enable_rc6(dev))
+               return;
 
-       /* Enable PMON + select events */
-       I915_WRITE(ECR, 0x80000019);
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       lcfuse = I915_READ(LCFUSE02);
+       ret = ironlake_setup_rc6(dev);
+       if (ret)
+               return;
 
-       dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
-}
+       was_interruptible = dev_priv->mm.interruptible;
+       dev_priv->mm.interruptible = false;
 
-void intel_init_gt_powersave(struct drm_device *dev)
-{
-       i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
+       /*
+        * GPU can automatically power down the render unit if given a page
+        * to save state.
+        */
+       ret = intel_ring_begin(ring, 6);
+       if (ret) {
+               ironlake_teardown_rc6(dev);
+               dev_priv->mm.interruptible = was_interruptible;
+               return;
+       }
 
-       if (IS_CHERRYVIEW(dev))
-               cherryview_init_gt_powersave(dev);
-       else if (IS_VALLEYVIEW(dev))
-               valleyview_init_gt_powersave(dev);
-}
+       intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+       intel_ring_emit(ring, MI_SET_CONTEXT);
+       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
+                       MI_MM_SPACE_GTT |
+                       MI_SAVE_EXT_STATE_EN |
+                       MI_RESTORE_EXT_STATE_EN |
+                       MI_RESTORE_INHIBIT);
+       intel_ring_emit(ring, MI_SUSPEND_FLUSH);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_emit(ring, MI_FLUSH);
+       intel_ring_advance(ring);
 
-void intel_cleanup_gt_powersave(struct drm_device *dev)
-{
-       if (IS_CHERRYVIEW(dev))
+       /*
+        * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
+        * does an implicit flush, combined with MI_FLUSH above, it should be
+        * safe to assume that renderctx is valid
+        */
+       ret = intel_ring_idle(ring);
+       dev_priv->mm.interruptible = was_interruptible;
+       if (ret) {
+               DRM_ERROR("failed to enable ironlake power savings\n");
+               ironlake_teardown_rc6(dev);
                return;
-       else if (IS_VALLEYVIEW(dev))
-               valleyview_cleanup_gt_powersave(dev);
+       }
+
+       I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
+       I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+
+       intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
 }
 
-/**
- * intel_suspend_gt_powersave - suspend PM work and helper threads
- * @dev: drm device
- *
- * We don't want to disable RC6 or other features here, we just want
- * to make sure any work we've queued has finished and won't bother
- * us while we're suspended.
- */
-void intel_suspend_gt_powersave(struct drm_device *dev)
+static unsigned long intel_pxfreq(u32 vidfreq)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       /* Interrupts should be disabled already to avoid re-arming. */
-       WARN_ON(intel_irqs_enabled(dev_priv));
+       unsigned long freq;
+       int div = (vidfreq & 0x3f0000) >> 16;
+       int post = (vidfreq & 0x3000) >> 12;
+       int pre = (vidfreq & 0x7);
 
-       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+       if (!pre)
+               return 0;
 
-       cancel_work_sync(&dev_priv->rps.work);
+       freq = ((div * 133333) / ((1<<post) * pre));
 
-       /* Force GPU to min freq during suspend */
-       gen6_rps_idle(dev_priv);
+       return freq;
 }
 
-void intel_disable_gt_powersave(struct drm_device *dev)
+static const struct cparams {
+       u16 i;
+       u16 t;
+       u16 m;
+       u16 c;
+} cparams[] = {
+       { 1, 1333, 301, 28664 },
+       { 1, 1066, 294, 24460 },
+       { 1, 800, 294, 25192 },
+       { 0, 1333, 276, 27605 },
+       { 0, 1066, 276, 27605 },
+       { 0, 800, 231, 23784 },
+};
+
+static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       u64 total_count, diff, ret;
+       u32 count1, count2, count3, m = 0, c = 0;
+       unsigned long now = jiffies_to_msecs(jiffies), diff1;
+       int i;
 
-       /* Interrupts should be disabled already to avoid re-arming. */
-       WARN_ON(intel_irqs_enabled(dev_priv));
+       assert_spin_locked(&mchdev_lock);
 
-       if (IS_IRONLAKE_M(dev)) {
-               ironlake_disable_drps(dev);
-               ironlake_disable_rc6(dev);
-       } else if (INTEL_INFO(dev)->gen >= 6) {
-               intel_suspend_gt_powersave(dev);
+       diff1 = now - dev_priv->ips.last_time1;
 
-               mutex_lock(&dev_priv->rps.hw_lock);
-               if (IS_CHERRYVIEW(dev))
-                       cherryview_disable_rps(dev);
-               else if (IS_VALLEYVIEW(dev))
-                       valleyview_disable_rps(dev);
-               else
-                       gen6_disable_rps(dev);
-               dev_priv->rps.enabled = false;
-               mutex_unlock(&dev_priv->rps.hw_lock);
-       }
-}
+       /* Prevent division-by-zero if we are asking too fast.
+        * Also, we don't get interesting results if we are polling
+        * faster than once in 10ms, so just return the saved value
+        * in such cases.
+        */
+       if (diff1 <= 10)
+               return dev_priv->ips.chipset_power;
 
-static void intel_gen6_powersave_work(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, struct drm_i915_private,
-                            rps.delayed_resume_work.work);
-       struct drm_device *dev = dev_priv->dev;
+       count1 = I915_READ(DMIEC);
+       count2 = I915_READ(DDREC);
+       count3 = I915_READ(CSIEC);
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       total_count = count1 + count2 + count3;
 
-       if (IS_CHERRYVIEW(dev)) {
-               cherryview_enable_rps(dev);
-       } else if (IS_VALLEYVIEW(dev)) {
-               valleyview_enable_rps(dev);
-       } else if (IS_BROADWELL(dev)) {
-               gen8_enable_rps(dev);
-               __gen6_update_ring_freq(dev);
+       /* FIXME: handle per-counter overflow */
+       if (total_count < dev_priv->ips.last_count1) {
+               diff = ~0UL - dev_priv->ips.last_count1;
+               diff += total_count;
        } else {
-               gen6_enable_rps(dev);
-               __gen6_update_ring_freq(dev);
+               diff = total_count - dev_priv->ips.last_count1;
        }
-       dev_priv->rps.enabled = true;
-       mutex_unlock(&dev_priv->rps.hw_lock);
 
-       intel_runtime_pm_put(dev_priv);
+       for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+               if (cparams[i].i == dev_priv->ips.c_m &&
+                   cparams[i].t == dev_priv->ips.r_t) {
+                       m = cparams[i].m;
+                       c = cparams[i].c;
+                       break;
+               }
+       }
+
+       diff = div_u64(diff, diff1);
+       ret = ((m * diff) + c);
+       ret = div_u64(ret, 10);
+
+       dev_priv->ips.last_count1 = total_count;
+       dev_priv->ips.last_time1 = now;
+
+       dev_priv->ips.chipset_power = ret;
+
+       return ret;
 }
 
-void intel_enable_gt_powersave(struct drm_device *dev)
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (IS_IRONLAKE_M(dev)) {
-               mutex_lock(&dev->struct_mutex);
-               ironlake_enable_drps(dev);
-               ironlake_enable_rc6(dev);
-               intel_init_emon(dev);
-               mutex_unlock(&dev->struct_mutex);
-       } else if (INTEL_INFO(dev)->gen >= 6) {
-               /*
-                * PCU communication is slow and this doesn't need to be
-                * done at any specific time, so do this out of our fast path
-                * to make resume and init faster.
-                *
-                * We depend on the HW RC6 power context save/restore
-                * mechanism when entering D3 through runtime PM suspend. So
-                * disable RPM until RPS/RC6 is properly setup. We can only
-                * get here via the driver load/system resume/runtime resume
-                * paths, so the _noresume version is enough (and in case of
-                * runtime resume it's necessary).
-                */
-               if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
-                                          round_jiffies_up_relative(HZ)))
-                       intel_runtime_pm_get_noresume(dev_priv);
-       }
-}
-
-void intel_reset_gt_powersave(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       dev_priv->rps.enabled = false;
-       intel_enable_gt_powersave(dev);
-}
-
-static void ibx_init_clock_gating(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       /*
-        * On Ibex Peak and Cougar Point, we need to disable clock
-        * gating for the panel power sequencer or it will fail to
-        * start up when no ports are active.
-        */
-       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void g4x_disable_trickle_feed(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipe;
-
-       for_each_pipe(dev_priv, pipe) {
-               I915_WRITE(DSPCNTR(pipe),
-                          I915_READ(DSPCNTR(pipe)) |
-                          DISPPLANE_TRICKLE_FEED_DISABLE);
-               intel_flush_primary_plane(dev_priv, pipe);
-       }
-}
-
-static void ilk_init_lp_watermarks(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
-       I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
-       I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
-
-       /*
-        * Don't touch WM1S_LP_EN here.
-        * Doing so could cause underruns.
-        */
-}
-
-static void ironlake_init_clock_gating(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
-
-       /*
-        * Required for FBC
-        * WaFbcDisableDpfcClockGating:ilk
-        */
-       dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
-                  ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
-                  ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
-
-       I915_WRITE(PCH_3DCGDIS0,
-                  MARIUNIT_CLOCK_GATE_DISABLE |
-                  SVSMUNIT_CLOCK_GATE_DISABLE);
-       I915_WRITE(PCH_3DCGDIS1,
-                  VFMUNIT_CLOCK_GATE_DISABLE);
-
-       /*
-        * According to the spec the following bits should be set in
-        * order to enable memory self-refresh
-        * The bit 22/21 of 0x42004
-        * The bit 5 of 0x42020
-        * The bit 15 of 0x45000
-        */
-       I915_WRITE(ILK_DISPLAY_CHICKEN2,
-                  (I915_READ(ILK_DISPLAY_CHICKEN2) |
-                   ILK_DPARB_GATE | ILK_VSDPFD_FULL));
-       dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
-       I915_WRITE(DISP_ARB_CTL,
-                  (I915_READ(DISP_ARB_CTL) |
-                   DISP_FBC_WM_DIS));
-
-       ilk_init_lp_watermarks(dev);
-
-       /*
-        * Based on the document from hardware guys the following bits
-        * should be set unconditionally in order to enable FBC.
-        * The bit 22 of 0x42000
-        * The bit 22 of 0x42004
-        * The bit 7,8,9 of 0x42020.
-        */
-       if (IS_IRONLAKE_M(dev)) {
-               /* WaFbcAsynchFlipDisableFbcQueue:ilk */
-               I915_WRITE(ILK_DISPLAY_CHICKEN1,
-                          I915_READ(ILK_DISPLAY_CHICKEN1) |
-                          ILK_FBCQ_DIS);
-               I915_WRITE(ILK_DISPLAY_CHICKEN2,
-                          I915_READ(ILK_DISPLAY_CHICKEN2) |
-                          ILK_DPARB_GATE);
-       }
-
-       I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
-
-       I915_WRITE(ILK_DISPLAY_CHICKEN2,
-                  I915_READ(ILK_DISPLAY_CHICKEN2) |
-                  ILK_ELPIN_409_SELECT);
-       I915_WRITE(_3D_CHICKEN2,
-                  _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
-                  _3D_CHICKEN2_WM_READ_PIPELINED);
-
-       /* WaDisableRenderCachePipelinedFlush:ilk */
-       I915_WRITE(CACHE_MODE_0,
-                  _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
-
-       /* WaDisable_RenderCache_OperationalFlush:ilk */
-       I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
-       g4x_disable_trickle_feed(dev);
-
-       ibx_init_clock_gating(dev);
-}
-
-static void cpt_init_clock_gating(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipe;
-       uint32_t val;
-
-       /*
-        * On Ibex Peak and Cougar Point, we need to disable clock
-        * gating for the panel power sequencer or it will fail to
-        * start up when no ports are active.
-        */
-       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
-                  PCH_DPLUNIT_CLOCK_GATE_DISABLE |
-                  PCH_CPUNIT_CLOCK_GATE_DISABLE);
-       I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
-                  DPLS_EDP_PPS_FIX_DIS);
-       /* The below fixes the weird display corruption, a few pixels shifted
-        * downward, on (only) LVDS of some HP laptops with IVY.
-        */
-       for_each_pipe(dev_priv, pipe) {
-               val = I915_READ(TRANS_CHICKEN2(pipe));
-               val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
-               val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
-               if (dev_priv->vbt.fdi_rx_polarity_inverted)
-                       val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
-               val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
-               val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
-               val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
-               I915_WRITE(TRANS_CHICKEN2(pipe), val);
-       }
-       /* WADP0ClockGatingDisable */
-       for_each_pipe(dev_priv, pipe) {
-               I915_WRITE(TRANS_CHICKEN1(pipe),
-                          TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
-       }
-}
-
-static void gen6_check_mch_setup(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t tmp;
-
-       tmp = I915_READ(MCH_SSKPD);
-       if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
-               DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
-                             tmp);
-}
-
-static void gen6_init_clock_gating(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
-
-       I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
-
-       I915_WRITE(ILK_DISPLAY_CHICKEN2,
-                  I915_READ(ILK_DISPLAY_CHICKEN2) |
-                  ILK_ELPIN_409_SELECT);
-
-       /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
-       I915_WRITE(_3D_CHICKEN,
-                  _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
-
-       /* WaSetupGtModeTdRowDispatch:snb */
-       if (IS_SNB_GT1(dev))
-               I915_WRITE(GEN6_GT_MODE,
-                          _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
-
-       /* WaDisable_RenderCache_OperationalFlush:snb */
-       I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
-       /*
-        * BSpec recoomends 8x4 when MSAA is used,
-        * however in practice 16x4 seems fastest.
-        *
-        * Note that PS/WM thread counts depend on the WIZ hashing
-        * disable bit, which we don't touch here, but it's good
-        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-        */
-       I915_WRITE(GEN6_GT_MODE,
-                  GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
-
-       ilk_init_lp_watermarks(dev);
-
-       I915_WRITE(CACHE_MODE_0,
-                  _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
-
-       I915_WRITE(GEN6_UCGCTL1,
-                  I915_READ(GEN6_UCGCTL1) |
-                  GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
-                  GEN6_CSUNIT_CLOCK_GATE_DISABLE);
-
-       /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
-        * gating disable must be set.  Failure to set it results in
-        * flickering pixels due to Z write ordering failures after
-        * some amount of runtime in the Mesa "fire" demo, and Unigine
-        * Sanctuary and Tropics, and apparently anything else with
-        * alpha test or pixel discard.
-        *
-        * According to the spec, bit 11 (RCCUNIT) must also be set,
-        * but we didn't debug actual testcases to find it out.
-        *
-        * WaDisableRCCUnitClockGating:snb
-        * WaDisableRCPBUnitClockGating:snb
-        */
-       I915_WRITE(GEN6_UCGCTL2,
-                  GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
-                  GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
-
-       /* WaStripsFansDisableFastClipPerformanceFix:snb */
-       I915_WRITE(_3D_CHICKEN3,
-                  _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
+       struct drm_device *dev = dev_priv->dev;
+       unsigned long val;
 
-       /*
-        * Bspec says:
-        * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
-        * 3DSTATE_SF number of SF output attributes is more than 16."
-        */
-       I915_WRITE(_3D_CHICKEN3,
-                  _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
+       if (INTEL_INFO(dev)->gen != 5)
+               return 0;
 
-       /*
-        * According to the spec the following bits should be
-        * set in order to enable memory self-refresh and fbc:
-        * The bit21 and bit22 of 0x42000
-        * The bit21 and bit22 of 0x42004
-        * The bit5 and bit7 of 0x42020
-        * The bit14 of 0x70180
-        * The bit14 of 0x71180
-        *
-        * WaFbcAsynchFlipDisableFbcQueue:snb
-        */
-       I915_WRITE(ILK_DISPLAY_CHICKEN1,
-                  I915_READ(ILK_DISPLAY_CHICKEN1) |
-                  ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
-       I915_WRITE(ILK_DISPLAY_CHICKEN2,
-                  I915_READ(ILK_DISPLAY_CHICKEN2) |
-                  ILK_DPARB_GATE | ILK_VSDPFD_FULL);
-       I915_WRITE(ILK_DSPCLK_GATE_D,
-                  I915_READ(ILK_DSPCLK_GATE_D) |
-                  ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
-                  ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
+       spin_lock_irq(&mchdev_lock);
 
-       g4x_disable_trickle_feed(dev);
+       val = __i915_chipset_val(dev_priv);
 
-       cpt_init_clock_gating(dev);
+       spin_unlock_irq(&mchdev_lock);
 
-       gen6_check_mch_setup(dev);
+       return val;
 }
 
-static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
 {
-       uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
-
-       /*
-        * WaVSThreadDispatchOverride:ivb,vlv
-        *
-        * This actually overrides the dispatch
-        * mode for all thread types.
-        */
-       reg &= ~GEN7_FF_SCHED_MASK;
-       reg |= GEN7_FF_TS_SCHED_HW;
-       reg |= GEN7_FF_VS_SCHED_HW;
-       reg |= GEN7_FF_DS_SCHED_HW;
+       unsigned long m, x, b;
+       u32 tsfs;
 
-       I915_WRITE(GEN7_FF_THREAD_MODE, reg);
-}
+       tsfs = I915_READ(TSFS);
 
-static void lpt_init_clock_gating(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
+       x = I915_READ8(TR1);
 
-       /*
-        * TODO: this bit should only be enabled when really needed, then
-        * disabled when not needed anymore in order to save power.
-        */
-       if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
-               I915_WRITE(SOUTH_DSPCLK_GATE_D,
-                          I915_READ(SOUTH_DSPCLK_GATE_D) |
-                          PCH_LP_PARTITION_LEVEL_DISABLE);
+       b = tsfs & TSFS_INTR_MASK;
 
-       /* WADPOClockGatingDisable:hsw */
-       I915_WRITE(_TRANSA_CHICKEN1,
-                  I915_READ(_TRANSA_CHICKEN1) |
-                  TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+       return ((m * x) / 127) - b;
 }
 
-static void lpt_suspend_hw(struct drm_device *dev)
+static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
-               uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
-
-               val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
-               I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
-       }
+       struct drm_device *dev = dev_priv->dev;
+       static const struct v_table {
+               u16 vd; /* in .1 mil */
+               u16 vm; /* in .1 mil */
+       } v_table[] = {
+               { 0, 0, },
+               { 375, 0, },
+               { 500, 0, },
+               { 625, 0, },
+               { 750, 0, },
+               { 875, 0, },
+               { 1000, 0, },
+               { 1125, 0, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4250, 3125, },
+               { 4375, 3250, },
+               { 4500, 3375, },
+               { 4625, 3500, },
+               { 4750, 3625, },
+               { 4875, 3750, },
+               { 5000, 3875, },
+               { 5125, 4000, },
+               { 5250, 4125, },
+               { 5375, 4250, },
+               { 5500, 4375, },
+               { 5625, 4500, },
+               { 5750, 4625, },
+               { 5875, 4750, },
+               { 6000, 4875, },
+               { 6125, 5000, },
+               { 6250, 5125, },
+               { 6375, 5250, },
+               { 6500, 5375, },
+               { 6625, 5500, },
+               { 6750, 5625, },
+               { 6875, 5750, },
+               { 7000, 5875, },
+               { 7125, 6000, },
+               { 7250, 6125, },
+               { 7375, 6250, },
+               { 7500, 6375, },
+               { 7625, 6500, },
+               { 7750, 6625, },
+               { 7875, 6750, },
+               { 8000, 6875, },
+               { 8125, 7000, },
+               { 8250, 7125, },
+               { 8375, 7250, },
+               { 8500, 7375, },
+               { 8625, 7500, },
+               { 8750, 7625, },
+               { 8875, 7750, },
+               { 9000, 7875, },
+               { 9125, 8000, },
+               { 9250, 8125, },
+               { 9375, 8250, },
+               { 9500, 8375, },
+               { 9625, 8500, },
+               { 9750, 8625, },
+               { 9875, 8750, },
+               { 10000, 8875, },
+               { 10125, 9000, },
+               { 10250, 9125, },
+               { 10375, 9250, },
+               { 10500, 9375, },
+               { 10625, 9500, },
+               { 10750, 9625, },
+               { 10875, 9750, },
+               { 11000, 9875, },
+               { 11125, 10000, },
+               { 11250, 10125, },
+               { 11375, 10250, },
+               { 11500, 10375, },
+               { 11625, 10500, },
+               { 11750, 10625, },
+               { 11875, 10750, },
+               { 12000, 10875, },
+               { 12125, 11000, },
+               { 12250, 11125, },
+               { 12375, 11250, },
+               { 12500, 11375, },
+               { 12625, 11500, },
+               { 12750, 11625, },
+               { 12875, 11750, },
+               { 13000, 11875, },
+               { 13125, 12000, },
+               { 13250, 12125, },
+               { 13375, 12250, },
+               { 13500, 12375, },
+               { 13625, 12500, },
+               { 13750, 12625, },
+               { 13875, 12750, },
+               { 14000, 12875, },
+               { 14125, 13000, },
+               { 14250, 13125, },
+               { 14375, 13250, },
+               { 14500, 13375, },
+               { 14625, 13500, },
+               { 14750, 13625, },
+               { 14875, 13750, },
+               { 15000, 13875, },
+               { 15125, 14000, },
+               { 15250, 14125, },
+               { 15375, 14250, },
+               { 15500, 14375, },
+               { 15625, 14500, },
+               { 15750, 14625, },
+               { 15875, 14750, },
+               { 16000, 14875, },
+               { 16125, 15000, },
+       };
+       if (INTEL_INFO(dev)->is_mobile)
+               return v_table[pxvid].vm;
+       else
+               return v_table[pxvid].vd;
 }
 
-static void broadwell_init_clock_gating(struct drm_device *dev)
+static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       enum pipe pipe;
-
-       I915_WRITE(WM3_LP_ILK, 0);
-       I915_WRITE(WM2_LP_ILK, 0);
-       I915_WRITE(WM1_LP_ILK, 0);
-
-       /* FIXME(BDW): Check all the w/a, some might only apply to
-        * pre-production hw. */
-
-
-       I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
+       u64 now, diff, diffms;
+       u32 count;
 
-       I915_WRITE(_3D_CHICKEN3,
-                  _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
+       assert_spin_locked(&mchdev_lock);
 
+       now = ktime_get_raw_ns();
+       diffms = now - dev_priv->ips.last_time2;
+       do_div(diffms, NSEC_PER_MSEC);
 
-       /* WaSwitchSolVfFArbitrationPriority:bdw */
-       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+       /* Don't divide by 0 */
+       if (!diffms)
+               return;
 
-       /* WaPsrDPAMaskVBlankInSRD:bdw */
-       I915_WRITE(CHICKEN_PAR1_1,
-                  I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
+       count = I915_READ(GFXEC);
 
-       /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
-       for_each_pipe(dev_priv, pipe) {
-               I915_WRITE(CHICKEN_PIPESL_1(pipe),
-                          I915_READ(CHICKEN_PIPESL_1(pipe)) |
-                          BDW_DPRS_MASK_VBLANK_SRD);
+       if (count < dev_priv->ips.last_count2) {
+               diff = ~0UL - dev_priv->ips.last_count2;
+               diff += count;
+       } else {
+               diff = count - dev_priv->ips.last_count2;
        }
 
-       /* WaVSRefCountFullforceMissDisable:bdw */
-       /* WaDSRefCountFullforceMissDisable:bdw */
-       I915_WRITE(GEN7_FF_THREAD_MODE,
-                  I915_READ(GEN7_FF_THREAD_MODE) &
-                  ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
-
-       I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
-                  _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
-
-       /* WaDisableSDEUnitClockGating:bdw */
-       I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
-                  GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+       dev_priv->ips.last_count2 = count;
+       dev_priv->ips.last_time2 = now;
 
-       lpt_init_clock_gating(dev);
+       /* More magic constants... */
+       diff = diff * 1181;
+       diff = div_u64(diff, diffms * 10);
+       dev_priv->ips.gfx_power = diff;
 }
 
-static void haswell_init_clock_gating(struct drm_device *dev)
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       ilk_init_lp_watermarks(dev);
-
-       /* L3 caching of data atomics doesn't work -- disable it. */
-       I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
-       I915_WRITE(HSW_ROW_CHICKEN3,
-                  _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
-
-       /* This is required by WaCatErrorRejectionIssue:hsw */
-       I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
-                       I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
-                       GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
-
-       /* WaVSRefCountFullforceMissDisable:hsw */
-       I915_WRITE(GEN7_FF_THREAD_MODE,
-                  I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
-
-       /* WaDisable_RenderCache_OperationalFlush:hsw */
-       I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
-       /* enable HiZ Raw Stall Optimization */
-       I915_WRITE(CACHE_MODE_0_GEN7,
-                  _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
-
-       /* WaDisable4x2SubspanOptimization:hsw */
-       I915_WRITE(CACHE_MODE_1,
-                  _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+       struct drm_device *dev = dev_priv->dev;
 
-       /*
-        * BSpec recommends 8x4 when MSAA is used,
-        * however in practice 16x4 seems fastest.
-        *
-        * Note that PS/WM thread counts depend on the WIZ hashing
-        * disable bit, which we don't touch here, but it's good
-        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-        */
-       I915_WRITE(GEN7_GT_MODE,
-                  GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+       if (INTEL_INFO(dev)->gen != 5)
+               return;
 
-       /* WaSwitchSolVfFArbitrationPriority:hsw */
-       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+       spin_lock_irq(&mchdev_lock);
 
-       /* WaRsPkgCStateDisplayPMReq:hsw */
-       I915_WRITE(CHICKEN_PAR1_1,
-                  I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+       __i915_update_gfx_val(dev_priv);
 
-       lpt_init_clock_gating(dev);
+       spin_unlock_irq(&mchdev_lock);
 }
 
-static void ivybridge_init_clock_gating(struct drm_device *dev)
+static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t snpcr;
-
-       ilk_init_lp_watermarks(dev);
-
-       I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
-
-       /* WaDisableEarlyCull:ivb */
-       I915_WRITE(_3D_CHICKEN3,
-                  _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
-
-       /* WaDisableBackToBackFlipFix:ivb */
-       I915_WRITE(IVB_CHICKEN3,
-                  CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
-                  CHICKEN3_DGMG_DONE_FIX_DISABLE);
-
-       /* WaDisablePSDDualDispatchEnable:ivb */
-       if (IS_IVB_GT1(dev))
-               I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
-                          _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
-
-       /* WaDisable_RenderCache_OperationalFlush:ivb */
-       I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
-       /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
-       I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
-                  GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
-
-       /* WaApplyL3ControlAndL3ChickenMode:ivb */
-       I915_WRITE(GEN7_L3CNTLREG1,
-                       GEN7_WA_FOR_GEN7_L3_CONTROL);
-       I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
-                  GEN7_WA_L3_CHICKEN_MODE);
-       if (IS_IVB_GT1(dev))
-               I915_WRITE(GEN7_ROW_CHICKEN2,
-                          _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
-       else {
-               /* must write both registers */
-               I915_WRITE(GEN7_ROW_CHICKEN2,
-                          _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
-               I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
-                          _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
-       }
-
-       /* WaForceL3Serialization:ivb */
-       I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
-                  ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+       unsigned long t, corr, state1, corr2, state2;
+       u32 pxvid, ext_v;
 
-       /*
-        * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
-        * This implements the WaDisableRCZUnitClockGating:ivb workaround.
-        */
-       I915_WRITE(GEN6_UCGCTL2,
-                  GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+       assert_spin_locked(&mchdev_lock);
 
-       /* This is required by WaCatErrorRejectionIssue:ivb */
-       I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
-                       I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
-                       GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+       pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
+       pxvid = (pxvid >> 24) & 0x7f;
+       ext_v = pvid_to_extvid(dev_priv, pxvid);
 
-       g4x_disable_trickle_feed(dev);
+       state1 = ext_v;
 
-       gen7_setup_fixed_func_scheduler(dev_priv);
+       t = i915_mch_val(dev_priv);
 
-       if (0) { /* causes HiZ corruption on ivb:gt1 */
-               /* enable HiZ Raw Stall Optimization */
-               I915_WRITE(CACHE_MODE_0_GEN7,
-                          _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
-       }
+       /* Revel in the empirically derived constants */
 
-       /* WaDisable4x2SubspanOptimization:ivb */
-       I915_WRITE(CACHE_MODE_1,
-                  _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+       /* Correction factor in 1/100000 units */
+       if (t > 80)
+               corr = ((t * 2349) + 135940);
+       else if (t >= 50)
+               corr = ((t * 964) + 29317);
+       else /* < 50 */
+               corr = ((t * 301) + 1004);
 
-       /*
-        * BSpec recommends 8x4 when MSAA is used,
-        * however in practice 16x4 seems fastest.
-        *
-        * Note that PS/WM thread counts depend on the WIZ hashing
-        * disable bit, which we don't touch here, but it's good
-        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-        */
-       I915_WRITE(GEN7_GT_MODE,
-                  GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+       corr = corr * ((150142 * state1) / 10000 - 78642);
+       corr /= 100000;
+       corr2 = (corr * dev_priv->ips.corr);
 
-       snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
-       snpcr &= ~GEN6_MBC_SNPCR_MASK;
-       snpcr |= GEN6_MBC_SNPCR_MED;
-       I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+       state2 = (corr2 * state1) / 10000;
+       state2 /= 100; /* convert to mW */
 
-       if (!HAS_PCH_NOP(dev))
-               cpt_init_clock_gating(dev);
+       __i915_update_gfx_val(dev_priv);
 
-       gen6_check_mch_setup(dev);
+       return dev_priv->ips.gfx_power + state2;
 }
 
-static void valleyview_init_clock_gating(struct drm_device *dev)
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_device *dev = dev_priv->dev;
+       unsigned long val;
 
-       I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+       if (INTEL_INFO(dev)->gen != 5)
+               return 0;
 
-       /* WaDisableEarlyCull:vlv */
-       I915_WRITE(_3D_CHICKEN3,
-                  _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+       spin_lock_irq(&mchdev_lock);
 
-       /* WaDisableBackToBackFlipFix:vlv */
-       I915_WRITE(IVB_CHICKEN3,
-                  CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
-                  CHICKEN3_DGMG_DONE_FIX_DISABLE);
+       val = __i915_gfx_val(dev_priv);
 
-       /* WaPsdDispatchEnable:vlv */
-       /* WaDisablePSDDualDispatchEnable:vlv */
-       I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
-                  _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
-                                     GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+       spin_unlock_irq(&mchdev_lock);
 
-       /* WaDisable_RenderCache_OperationalFlush:vlv */
-       I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+       return val;
+}
 
-       /* WaForceL3Serialization:vlv */
-       I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
-                  ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+       struct drm_i915_private *dev_priv;
+       unsigned long chipset_val, graphics_val, ret = 0;
 
-       /* WaDisableDopClockGating:vlv */
-       I915_WRITE(GEN7_ROW_CHICKEN2,
-                  _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+       spin_lock_irq(&mchdev_lock);
+       if (!i915_mch_dev)
+               goto out_unlock;
+       dev_priv = i915_mch_dev;
 
-       /* This is required by WaCatErrorRejectionIssue:vlv */
-       I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
-                  I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
-                  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+       chipset_val = __i915_chipset_val(dev_priv);
+       graphics_val = __i915_gfx_val(dev_priv);
 
-       gen7_setup_fixed_func_scheduler(dev_priv);
+       ret = chipset_val + graphics_val;
 
-       /*
-        * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
-        * This implements the WaDisableRCZUnitClockGating:vlv workaround.
-        */
-       I915_WRITE(GEN6_UCGCTL2,
-                  GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+out_unlock:
+       spin_unlock_irq(&mchdev_lock);
 
-       /* WaDisableL3Bank2xClockGate:vlv
-        * Disabling L3 clock gating- MMIO 940c[25] = 1
-        * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
-       I915_WRITE(GEN7_UCGCTL4,
-                  I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
 
-       I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+       struct drm_i915_private *dev_priv;
+       bool ret = true;
 
-       /*
-        * BSpec says this must be set, even though
-        * WaDisable4x2SubspanOptimization isn't listed for VLV.
-        */
-       I915_WRITE(CACHE_MODE_1,
-                  _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+       spin_lock_irq(&mchdev_lock);
+       if (!i915_mch_dev) {
+               ret = false;
+               goto out_unlock;
+       }
+       dev_priv = i915_mch_dev;
 
-       /*
-        * WaIncreaseL3CreditsForVLVB0:vlv
-        * This is the hardware default actually.
-        */
-       I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
+       if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
+               dev_priv->ips.max_delay--;
 
-       /*
-        * WaDisableVLVClockGating_VBIIssue:vlv
-        * Disable clock gating on th GCFG unit to prevent a delay
-        * in the reporting of vblank events.
-        */
-       I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
+out_unlock:
+       spin_unlock_irq(&mchdev_lock);
+
+       return ret;
 }
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
 
-static void cherryview_init_clock_gating(struct drm_device *dev)
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv;
+       bool ret = true;
 
-       I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+       spin_lock_irq(&mchdev_lock);
+       if (!i915_mch_dev) {
+               ret = false;
+               goto out_unlock;
+       }
+       dev_priv = i915_mch_dev;
 
-       I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+       if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
+               dev_priv->ips.max_delay++;
 
-       /* WaVSRefCountFullforceMissDisable:chv */
-       /* WaDSRefCountFullforceMissDisable:chv */
-       I915_WRITE(GEN7_FF_THREAD_MODE,
-                  I915_READ(GEN7_FF_THREAD_MODE) &
-                  ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
+out_unlock:
+       spin_unlock_irq(&mchdev_lock);
 
-       /* WaDisableSemaphoreAndSyncFlipWait:chv */
-       I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
-                  _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
+       return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
 
-       /* WaDisableCSUnitClockGating:chv */
-       I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
-                  GEN6_CSUNIT_CLOCK_GATE_DISABLE);
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+       struct drm_i915_private *dev_priv;
+       struct intel_engine_cs *ring;
+       bool ret = false;
+       int i;
 
-       /* WaDisableSDEUnitClockGating:chv */
-       I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
-                  GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+       spin_lock_irq(&mchdev_lock);
+       if (!i915_mch_dev)
+               goto out_unlock;
+       dev_priv = i915_mch_dev;
 
-       /* WaDisableGunitClockGating:chv (pre-production hw) */
-       I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
-                  GINT_DIS);
+       for_each_ring(ring, dev_priv, i)
+               ret |= !list_empty(&ring->request_list);
 
-       /* WaDisableFfDopClockGating:chv (pre-production hw) */
-       I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
-                  _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
+out_unlock:
+       spin_unlock_irq(&mchdev_lock);
 
-       /* WaDisableDopClockGating:chv (pre-production hw) */
-       I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
-                  GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
+       return ret;
 }
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
 
-static void g4x_init_clock_gating(struct drm_device *dev)
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t dspclk_gate;
+       struct drm_i915_private *dev_priv;
+       bool ret = true;
 
-       I915_WRITE(RENCLK_GATE_D1, 0);
-       I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
-                  GS_UNIT_CLOCK_GATE_DISABLE |
-                  CL_UNIT_CLOCK_GATE_DISABLE);
-       I915_WRITE(RAMCLK_GATE_D, 0);
-       dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
-               OVRUNIT_CLOCK_GATE_DISABLE |
-               OVCUNIT_CLOCK_GATE_DISABLE;
-       if (IS_GM45(dev))
-               dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
-       I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+       spin_lock_irq(&mchdev_lock);
+       if (!i915_mch_dev) {
+               ret = false;
+               goto out_unlock;
+       }
+       dev_priv = i915_mch_dev;
 
-       /* WaDisableRenderCachePipelinedFlush */
-       I915_WRITE(CACHE_MODE_0,
-                  _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+       dev_priv->ips.max_delay = dev_priv->ips.fstart;
 
-       /* WaDisable_RenderCache_OperationalFlush:g4x */
-       I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+       if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
+               ret = false;
 
-       g4x_disable_trickle_feed(dev);
+out_unlock:
+       spin_unlock_irq(&mchdev_lock);
+
+       return ret;
 }
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
 
-static void crestline_init_clock_gating(struct drm_device *dev)
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
-       I915_WRITE(RENCLK_GATE_D2, 0);
-       I915_WRITE(DSPCLK_GATE_D, 0);
-       I915_WRITE(RAMCLK_GATE_D, 0);
-       I915_WRITE16(DEUC, 0);
-       I915_WRITE(MI_ARB_STATE,
-                  _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+       void (*link)(void);
 
-       /* WaDisable_RenderCache_OperationalFlush:gen4 */
-       I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+       link = symbol_get(ips_link_to_i915_driver);
+       if (link) {
+               link();
+               symbol_put(ips_link_to_i915_driver);
+       }
 }
 
-static void broadwater_init_clock_gating(struct drm_device *dev)
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       /* We only register the i915 ips part with intel-ips once everything is
+        * set up, to avoid intel-ips sneaking in and reading bogus values. */
+       spin_lock_irq(&mchdev_lock);
+       i915_mch_dev = dev_priv;
+       spin_unlock_irq(&mchdev_lock);
 
-       I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
-                  I965_RCC_CLOCK_GATE_DISABLE |
-                  I965_RCPB_CLOCK_GATE_DISABLE |
-                  I965_ISC_CLOCK_GATE_DISABLE |
-                  I965_FBC_CLOCK_GATE_DISABLE);
-       I915_WRITE(RENCLK_GATE_D2, 0);
-       I915_WRITE(MI_ARB_STATE,
-                  _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+       ips_ping_for_i915_load();
+}
 
-       /* WaDisable_RenderCache_OperationalFlush:gen4 */
-       I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+void intel_gpu_ips_teardown(void)
+{
+       spin_lock_irq(&mchdev_lock);
+       i915_mch_dev = NULL;
+       spin_unlock_irq(&mchdev_lock);
 }
 
-static void gen3_init_clock_gating(struct drm_device *dev)
+static void intel_init_emon(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 dstate = I915_READ(D_STATE);
+       u32 lcfuse;
+       u8 pxw[16];
+       int i;
 
-       dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
-               DSTATE_DOT_CLOCK_GATING;
-       I915_WRITE(D_STATE, dstate);
+       /* Disable to program */
+       I915_WRITE(ECR, 0);
+       POSTING_READ(ECR);
 
-       if (IS_PINEVIEW(dev))
-               I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+       /* Program energy weights for various events */
+       I915_WRITE(SDEW, 0x15040d00);
+       I915_WRITE(CSIEW0, 0x007f0000);
+       I915_WRITE(CSIEW1, 0x1e220004);
+       I915_WRITE(CSIEW2, 0x04000004);
 
-       /* IIR "flip pending" means done if this bit is set */
-       I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+       for (i = 0; i < 5; i++)
+               I915_WRITE(PEW + (i * 4), 0);
+       for (i = 0; i < 3; i++)
+               I915_WRITE(DEW + (i * 4), 0);
 
-       /* interrupts should cause a wake up from C3 */
-       I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
+       /* Program P-state weights to account for frequency power adjustment */
+       for (i = 0; i < 16; i++) {
+               u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
+               unsigned long freq = intel_pxfreq(pxvidfreq);
+               unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
+                       PXVFREQ_PX_SHIFT;
+               unsigned long val;
 
-       /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
-       I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
+               val = vid * vid;
+               val *= (freq / 1000);
+               val *= 255;
+               val /= (127*127*900);
+               if (val > 0xff)
+                       DRM_ERROR("bad pxval: %ld\n", val);
+               pxw[i] = val;
+       }
+       /* Render standby states get 0 weight */
+       pxw[14] = 0;
+       pxw[15] = 0;
 
-       I915_WRITE(MI_ARB_STATE,
-                  _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
-}
+       for (i = 0; i < 4; i++) {
+               u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
+                       (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
+               I915_WRITE(PXW + (i * 4), val);
+       }
 
-static void i85x_init_clock_gating(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       /* Adjust magic regs to magic values (more experimental results) */
+       I915_WRITE(OGW0, 0);
+       I915_WRITE(OGW1, 0);
+       I915_WRITE(EG0, 0x00007f00);
+       I915_WRITE(EG1, 0x0000000e);
+       I915_WRITE(EG2, 0x000e0000);
+       I915_WRITE(EG3, 0x68000300);
+       I915_WRITE(EG4, 0x42000000);
+       I915_WRITE(EG5, 0x00140031);
+       I915_WRITE(EG6, 0);
+       I915_WRITE(EG7, 0);
 
-       I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+       for (i = 0; i < 8; i++)
+               I915_WRITE(PXWL + (i * 4), 0);
 
-       /* interrupts should cause a wake up from C3 */
-       I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
-                  _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
+       /* Enable PMON + select events */
+       I915_WRITE(ECR, 0x80000019);
 
-       I915_WRITE(MEM_MODE,
-                  _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
+       lcfuse = I915_READ(LCFUSE02);
+
+       dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
 }
 
-static void i830_init_clock_gating(struct drm_device *dev)
+void intel_init_gt_powersave(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
 
-       I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+       if (IS_CHERRYVIEW(dev))
+               cherryview_init_gt_powersave(dev);
+       else if (IS_VALLEYVIEW(dev))
+               valleyview_init_gt_powersave(dev);
+}
 
-       I915_WRITE(MEM_MODE,
-                  _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
-                  _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
+void intel_cleanup_gt_powersave(struct drm_device *dev)
+{
+       if (IS_CHERRYVIEW(dev))
+               return;
+       else if (IS_VALLEYVIEW(dev))
+               valleyview_cleanup_gt_powersave(dev);
 }
 
-void intel_init_clock_gating(struct drm_device *dev)
+/**
+ * intel_suspend_gt_powersave - suspend PM work and helper threads
+ * @dev: drm device
+ *
+ * We don't want to disable RC6 or other features here, we just want
+ * to make sure any work we've queued has finished and won't bother
+ * us while we're suspended.
+ */
+void intel_suspend_gt_powersave(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       dev_priv->display.init_clock_gating(dev);
+       if (INTEL_INFO(dev)->gen < 6)
+               return;
+
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+       /*
+        * TODO: disable RPS interrupts on GEN9+ too once RPS support
+        * is added for it.
+        */
+       if (INTEL_INFO(dev)->gen < 9)
+               gen6_disable_rps_interrupts(dev);
+
+       /* Force GPU to min freq during suspend */
+       gen6_rps_idle(dev_priv);
 }
 
-void intel_suspend_hw(struct drm_device *dev)
+void intel_disable_gt_powersave(struct drm_device *dev)
 {
-       if (HAS_PCH_LPT(dev))
-               lpt_suspend_hw(dev);
-}
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-#define for_each_power_well(i, power_well, domain_mask, power_domains) \
-       for (i = 0;                                                     \
-            i < (power_domains)->power_well_count &&                   \
-                ((power_well) = &(power_domains)->power_wells[i]);     \
-            i++)                                                       \
-               if ((power_well)->domains & (domain_mask))
+       if (IS_IRONLAKE_M(dev)) {
+               ironlake_disable_drps(dev);
+               ironlake_disable_rc6(dev);
+       } else if (INTEL_INFO(dev)->gen >= 6) {
+               intel_suspend_gt_powersave(dev);
 
-#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
-       for (i = (power_domains)->power_well_count - 1;                  \
-            i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
-            i--)                                                        \
-               if ((power_well)->domains & (domain_mask))
+               mutex_lock(&dev_priv->rps.hw_lock);
+               if (INTEL_INFO(dev)->gen >= 9)
+                       gen9_disable_rps(dev);
+               else if (IS_CHERRYVIEW(dev))
+                       cherryview_disable_rps(dev);
+               else if (IS_VALLEYVIEW(dev))
+                       valleyview_disable_rps(dev);
+               else
+                       gen6_disable_rps(dev);
 
-/**
- * We should only use the power well if we explicitly asked the hardware to
- * enable it, so check if it's enabled and also check if we've requested it to
- * be enabled.
- */
-static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       return I915_READ(HSW_PWR_WELL_DRIVER) ==
-                    (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+               dev_priv->rps.enabled = false;
+               mutex_unlock(&dev_priv->rps.hw_lock);
+       }
 }
 
-bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
-                                         enum intel_display_power_domain domain)
+static void intel_gen6_powersave_work(struct work_struct *work)
 {
-       struct i915_power_domains *power_domains;
-       struct i915_power_well *power_well;
-       bool is_enabled;
-       int i;
+       struct drm_i915_private *dev_priv =
+               container_of(work, struct drm_i915_private,
+                            rps.delayed_resume_work.work);
+       struct drm_device *dev = dev_priv->dev;
 
-       if (dev_priv->pm.suspended)
-               return false;
+       mutex_lock(&dev_priv->rps.hw_lock);
 
-       power_domains = &dev_priv->power_domains;
+       /*
+        * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
+        * added for it.
+        */
+       if (INTEL_INFO(dev)->gen < 9)
+               gen6_reset_rps_interrupts(dev);
 
-       is_enabled = true;
+       if (IS_CHERRYVIEW(dev)) {
+               cherryview_enable_rps(dev);
+       } else if (IS_VALLEYVIEW(dev)) {
+               valleyview_enable_rps(dev);
+       } else if (INTEL_INFO(dev)->gen >= 9) {
+               gen9_enable_rps(dev);
+       } else if (IS_BROADWELL(dev)) {
+               gen8_enable_rps(dev);
+               __gen6_update_ring_freq(dev);
+       } else {
+               gen6_enable_rps(dev);
+               __gen6_update_ring_freq(dev);
+       }
+       dev_priv->rps.enabled = true;
 
-       for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
-               if (power_well->always_on)
-                       continue;
+       if (INTEL_INFO(dev)->gen < 9)
+               gen6_enable_rps_interrupts(dev);
 
-               if (!power_well->hw_enabled) {
-                       is_enabled = false;
-                       break;
-               }
-       }
+       mutex_unlock(&dev_priv->rps.hw_lock);
+
+       intel_runtime_pm_put(dev_priv);
+}
+
+void intel_enable_gt_powersave(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       return is_enabled;
+       if (IS_IRONLAKE_M(dev)) {
+               mutex_lock(&dev->struct_mutex);
+               ironlake_enable_drps(dev);
+               ironlake_enable_rc6(dev);
+               intel_init_emon(dev);
+               mutex_unlock(&dev->struct_mutex);
+       } else if (INTEL_INFO(dev)->gen >= 6) {
+               /*
+                * PCU communication is slow and this doesn't need to be
+                * done at any specific time, so do this out of our fast path
+                * to make resume and init faster.
+                *
+                * We depend on the HW RC6 power context save/restore
+                * mechanism when entering D3 through runtime PM suspend. So
+                * disable RPM until RPS/RC6 is properly setup. We can only
+                * get here via the driver load/system resume/runtime resume
+                * paths, so the _noresume version is enough (and in case of
+                * runtime resume it's necessary).
+                */
+               if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
+                                          round_jiffies_up_relative(HZ)))
+                       intel_runtime_pm_get_noresume(dev_priv);
+       }
 }
 
-bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
-                                enum intel_display_power_domain domain)
+void intel_reset_gt_powersave(struct drm_device *dev)
 {
-       struct i915_power_domains *power_domains;
-       bool ret;
-
-       power_domains = &dev_priv->power_domains;
-
-       mutex_lock(&power_domains->lock);
-       ret = intel_display_power_enabled_unlocked(dev_priv, domain);
-       mutex_unlock(&power_domains->lock);
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       return ret;
+       dev_priv->rps.enabled = false;
+       intel_enable_gt_powersave(dev);
 }
 
-/*
- * Starting with Haswell, we have a "Power Down Well" that can be turned off
- * when not needed anymore. We have 4 registers that can request the power well
- * to be enabled, and it will only be disabled if none of the registers is
- * requesting it to be enabled.
- */
-static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
+static void ibx_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
        /*
-        * After we re-enable the power well, if we touch VGA register 0x3d5
-        * we'll get unclaimed register interrupts. This stops after we write
-        * anything to the VGA MSR register. The vgacon module uses this
-        * register all the time, so if we unbind our driver and, as a
-        * consequence, bind vgacon, we'll get stuck in an infinite loop at
-        * console_unlock(). So make here we touch the VGA MSR register, making
-        * sure vgacon can keep working normally without triggering interrupts
-        * and error messages.
+        * On Ibex Peak and Cougar Point, we need to disable clock
+        * gating for the panel power sequencer or it will fail to
+        * start up when no ports are active.
         */
-       vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
-       outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
-       vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
-
-       if (IS_BROADWELL(dev))
-               gen8_irq_power_well_post_enable(dev_priv);
+       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
 }
 
-static void hsw_set_power_well(struct drm_i915_private *dev_priv,
-                              struct i915_power_well *power_well, bool enable)
+static void g4x_disable_trickle_feed(struct drm_device *dev)
 {
-       bool is_enabled, enable_requested;
-       uint32_t tmp;
-
-       tmp = I915_READ(HSW_PWR_WELL_DRIVER);
-       is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
-       enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
-
-       if (enable) {
-               if (!enable_requested)
-                       I915_WRITE(HSW_PWR_WELL_DRIVER,
-                                  HSW_PWR_WELL_ENABLE_REQUEST);
-
-               if (!is_enabled) {
-                       DRM_DEBUG_KMS("Enabling power well\n");
-                       if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
-                                     HSW_PWR_WELL_STATE_ENABLED), 20))
-                               DRM_ERROR("Timeout enabling power well\n");
-               }
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int pipe;
 
-               hsw_power_well_post_enable(dev_priv);
-       } else {
-               if (enable_requested) {
-                       I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
-                       POSTING_READ(HSW_PWR_WELL_DRIVER);
-                       DRM_DEBUG_KMS("Requesting to disable the power well\n");
-               }
+       for_each_pipe(dev_priv, pipe) {
+               I915_WRITE(DSPCNTR(pipe),
+                          I915_READ(DSPCNTR(pipe)) |
+                          DISPPLANE_TRICKLE_FEED_DISABLE);
+               intel_flush_primary_plane(dev_priv, pipe);
        }
 }
 
-static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
+static void ilk_init_lp_watermarks(struct drm_device *dev)
 {
-       hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
+       I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
+       I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
 
        /*
-        * We're taking over the BIOS, so clear any requests made by it since
-        * the driver is in charge now.
+        * Don't touch WM1S_LP_EN here.
+        * Doing so could cause underruns.
         */
-       if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
-               I915_WRITE(HSW_PWR_WELL_BIOS, 0);
-}
-
-static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
-                                 struct i915_power_well *power_well)
-{
-       hsw_set_power_well(dev_priv, power_well, true);
-}
-
-static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       hsw_set_power_well(dev_priv, power_well, false);
 }
 
-static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
-{
-}
-
-static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
-                                            struct i915_power_well *power_well)
-{
-       return true;
-}
-
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
-                              struct i915_power_well *power_well, bool enable)
+static void ironlake_init_clock_gating(struct drm_device *dev)
 {
-       enum punit_power_well power_well_id = power_well->data;
-       u32 mask;
-       u32 state;
-       u32 ctrl;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
-       mask = PUNIT_PWRGT_MASK(power_well_id);
-       state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
-                        PUNIT_PWRGT_PWR_GATE(power_well_id);
+       /*
+        * Required for FBC
+        * WaFbcDisableDpfcClockGating:ilk
+        */
+       dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
+                  ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
+                  ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       I915_WRITE(PCH_3DCGDIS0,
+                  MARIUNIT_CLOCK_GATE_DISABLE |
+                  SVSMUNIT_CLOCK_GATE_DISABLE);
+       I915_WRITE(PCH_3DCGDIS1,
+                  VFMUNIT_CLOCK_GATE_DISABLE);
 
-#define COND \
-       ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+       /*
+        * According to the spec the following bits should be set in
+        * order to enable memory self-refresh
+        * The bit 22/21 of 0x42004
+        * The bit 5 of 0x42020
+        * The bit 15 of 0x45000
+        */
+       I915_WRITE(ILK_DISPLAY_CHICKEN2,
+                  (I915_READ(ILK_DISPLAY_CHICKEN2) |
+                   ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+       dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
+       I915_WRITE(DISP_ARB_CTL,
+                  (I915_READ(DISP_ARB_CTL) |
+                   DISP_FBC_WM_DIS));
 
-       if (COND)
-               goto out;
+       ilk_init_lp_watermarks(dev);
 
-       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
-       ctrl &= ~mask;
-       ctrl |= state;
-       vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+       /*
+        * Based on the document from hardware guys the following bits
+        * should be set unconditionally in order to enable FBC.
+        * The bit 22 of 0x42000
+        * The bit 22 of 0x42004
+        * The bit 7,8,9 of 0x42020.
+        */
+       if (IS_IRONLAKE_M(dev)) {
+               /* WaFbcAsynchFlipDisableFbcQueue:ilk */
+               I915_WRITE(ILK_DISPLAY_CHICKEN1,
+                          I915_READ(ILK_DISPLAY_CHICKEN1) |
+                          ILK_FBCQ_DIS);
+               I915_WRITE(ILK_DISPLAY_CHICKEN2,
+                          I915_READ(ILK_DISPLAY_CHICKEN2) |
+                          ILK_DPARB_GATE);
+       }
 
-       if (wait_for(COND, 100))
-               DRM_ERROR("timout setting power well state %08x (%08x)\n",
-                         state,
-                         vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+       I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
 
-#undef COND
+       I915_WRITE(ILK_DISPLAY_CHICKEN2,
+                  I915_READ(ILK_DISPLAY_CHICKEN2) |
+                  ILK_ELPIN_409_SELECT);
+       I915_WRITE(_3D_CHICKEN2,
+                  _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+                  _3D_CHICKEN2_WM_READ_PIPELINED);
 
-out:
-       mutex_unlock(&dev_priv->rps.hw_lock);
-}
+       /* WaDisableRenderCachePipelinedFlush:ilk */
+       I915_WRITE(CACHE_MODE_0,
+                  _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
 
-static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
-}
+       /* WaDisable_RenderCache_OperationalFlush:ilk */
+       I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
-static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
-                                 struct i915_power_well *power_well)
-{
-       vlv_set_power_well(dev_priv, power_well, true);
-}
+       g4x_disable_trickle_feed(dev);
 
-static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       vlv_set_power_well(dev_priv, power_well, false);
+       ibx_init_clock_gating(dev);
 }
 
-static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
+static void cpt_init_clock_gating(struct drm_device *dev)
 {
-       int power_well_id = power_well->data;
-       bool enabled = false;
-       u32 mask;
-       u32 state;
-       u32 ctrl;
-
-       mask = PUNIT_PWRGT_MASK(power_well_id);
-       ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
-
-       mutex_lock(&dev_priv->rps.hw_lock);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int pipe;
+       uint32_t val;
 
-       state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
        /*
-        * We only ever set the power-on and power-gate states, anything
-        * else is unexpected.
+        * On Ibex Peak and Cougar Point, we need to disable clock
+        * gating for the panel power sequencer or it will fail to
+        * start up when no ports are active.
         */
-       WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
-               state != PUNIT_PWRGT_PWR_GATE(power_well_id));
-       if (state == ctrl)
-               enabled = true;
-
-       /*
-        * A transient state at this point would mean some unexpected party
-        * is poking at the power controls too.
+       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
+                  PCH_DPLUNIT_CLOCK_GATE_DISABLE |
+                  PCH_CPUNIT_CLOCK_GATE_DISABLE);
+       I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+                  DPLS_EDP_PPS_FIX_DIS);
+       /* The below fixes the weird display corruption, a few pixels shifted
+        * downward, on (only) LVDS of some HP laptops with IVY.
         */
-       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
-       WARN_ON(ctrl != state);
+       for_each_pipe(dev_priv, pipe) {
+               val = I915_READ(TRANS_CHICKEN2(pipe));
+               val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+               val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+               if (dev_priv->vbt.fdi_rx_polarity_inverted)
+                       val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+               val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+               val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
+               val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
+               I915_WRITE(TRANS_CHICKEN2(pipe), val);
+       }
+       /* WADP0ClockGatingDisable */
+       for_each_pipe(dev_priv, pipe) {
+               I915_WRITE(TRANS_CHICKEN1(pipe),
+                          TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+       }
+}
 
-       mutex_unlock(&dev_priv->rps.hw_lock);
+static void gen6_check_mch_setup(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t tmp;
 
-       return enabled;
+       tmp = I915_READ(MCH_SSKPD);
+       if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
+               DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
+                             tmp);
 }
 
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
-                                         struct i915_power_well *power_well)
+static void gen6_init_clock_gating(struct drm_device *dev)
 {
-       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
-       vlv_set_power_well(dev_priv, power_well, true);
+       I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
 
-       spin_lock_irq(&dev_priv->irq_lock);
-       valleyview_enable_display_irqs(dev_priv);
-       spin_unlock_irq(&dev_priv->irq_lock);
+       I915_WRITE(ILK_DISPLAY_CHICKEN2,
+                  I915_READ(ILK_DISPLAY_CHICKEN2) |
+                  ILK_ELPIN_409_SELECT);
 
-       /*
-        * During driver initialization/resume we can avoid restoring the
-        * part of the HW/SW state that will be inited anyway explicitly.
-        */
-       if (dev_priv->power_domains.initializing)
-               return;
+       /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
+       I915_WRITE(_3D_CHICKEN,
+                  _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
 
-       intel_hpd_init(dev_priv->dev);
+       /* WaDisable_RenderCache_OperationalFlush:snb */
+       I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
-       i915_redisable_vga_power_on(dev_priv->dev);
-}
+       /*
+        * BSpec recoomends 8x4 when MSAA is used,
+        * however in practice 16x4 seems fastest.
+        *
+        * Note that PS/WM thread counts depend on the WIZ hashing
+        * disable bit, which we don't touch here, but it's good
+        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+        */
+       I915_WRITE(GEN6_GT_MODE,
+                  GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 
-static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
-{
-       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+       ilk_init_lp_watermarks(dev);
 
-       spin_lock_irq(&dev_priv->irq_lock);
-       valleyview_disable_display_irqs(dev_priv);
-       spin_unlock_irq(&dev_priv->irq_lock);
+       I915_WRITE(CACHE_MODE_0,
+                  _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
 
-       vlv_set_power_well(dev_priv, power_well, false);
+       I915_WRITE(GEN6_UCGCTL1,
+                  I915_READ(GEN6_UCGCTL1) |
+                  GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
+                  GEN6_CSUNIT_CLOCK_GATE_DISABLE);
 
-       vlv_power_sequencer_reset(dev_priv);
-}
+       /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+        * gating disable must be set.  Failure to set it results in
+        * flickering pixels due to Z write ordering failures after
+        * some amount of runtime in the Mesa "fire" demo, and Unigine
+        * Sanctuary and Tropics, and apparently anything else with
+        * alpha test or pixel discard.
+        *
+        * According to the spec, bit 11 (RCCUNIT) must also be set,
+        * but we didn't debug actual testcases to find it out.
+        *
+        * WaDisableRCCUnitClockGating:snb
+        * WaDisableRCPBUnitClockGating:snb
+        */
+       I915_WRITE(GEN6_UCGCTL2,
+                  GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+                  GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
 
-static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
-{
-       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+       /* WaStripsFansDisableFastClipPerformanceFix:snb */
+       I915_WRITE(_3D_CHICKEN3,
+                  _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
 
        /*
-        * Enable the CRI clock source so we can get at the
-        * display and the reference clock for VGA
-        * hotplug / manual detection.
+        * Bspec says:
+        * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
+        * 3DSTATE_SF number of SF output attributes is more than 16."
         */
-       I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-                  DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
-       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-
-       vlv_set_power_well(dev_priv, power_well, true);
+       I915_WRITE(_3D_CHICKEN3,
+                  _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
 
        /*
-        * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
-        *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
-        *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
-        *   b. The other bits such as sfr settings / modesel may all
-        *      be set to 0.
+        * According to the spec the following bits should be
+        * set in order to enable memory self-refresh and fbc:
+        * The bit21 and bit22 of 0x42000
+        * The bit21 and bit22 of 0x42004
+        * The bit5 and bit7 of 0x42020
+        * The bit14 of 0x70180
+        * The bit14 of 0x71180
         *
-        * This should only be done on init and resume from S3 with
-        * both PLLs disabled, or we risk losing DPIO and PLL
-        * synchronization.
+        * WaFbcAsynchFlipDisableFbcQueue:snb
         */
-       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-}
-
-static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
-                                           struct i915_power_well *power_well)
-{
-       enum pipe pipe;
-
-       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+       I915_WRITE(ILK_DISPLAY_CHICKEN1,
+                  I915_READ(ILK_DISPLAY_CHICKEN1) |
+                  ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+       I915_WRITE(ILK_DISPLAY_CHICKEN2,
+                  I915_READ(ILK_DISPLAY_CHICKEN2) |
+                  ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+       I915_WRITE(ILK_DSPCLK_GATE_D,
+                  I915_READ(ILK_DSPCLK_GATE_D) |
+                  ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
+                  ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
 
-       for_each_pipe(dev_priv, pipe)
-               assert_pll_disabled(dev_priv, pipe);
+       g4x_disable_trickle_feed(dev);
 
-       /* Assert common reset */
-       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+       cpt_init_clock_gating(dev);
 
-       vlv_set_power_well(dev_priv, power_well, false);
+       gen6_check_mch_setup(dev);
 }
 
-static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
+static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
 {
-       enum dpio_phy phy;
-
-       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
-                    power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+       uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
 
        /*
-        * Enable the CRI clock source so we can get at the
-        * display and the reference clock for VGA
-        * hotplug / manual detection.
+        * WaVSThreadDispatchOverride:ivb,vlv
+        *
+        * This actually overrides the dispatch
+        * mode for all thread types.
         */
-       if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
-               phy = DPIO_PHY0;
-               I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-                          DPLL_REFA_CLK_ENABLE_VLV);
-               I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-                          DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
-       } else {
-               phy = DPIO_PHY1;
-               I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
-                          DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
-       }
-       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-       vlv_set_power_well(dev_priv, power_well, true);
-
-       /* Poll for phypwrgood signal */
-       if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
-               DRM_ERROR("Display PHY %d is not power up\n", phy);
+       reg &= ~GEN7_FF_SCHED_MASK;
+       reg |= GEN7_FF_TS_SCHED_HW;
+       reg |= GEN7_FF_VS_SCHED_HW;
+       reg |= GEN7_FF_DS_SCHED_HW;
 
-       I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
-                  PHY_COM_LANE_RESET_DEASSERT(phy));
+       I915_WRITE(GEN7_FF_THREAD_MODE, reg);
 }
 
-static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
-                                           struct i915_power_well *power_well)
+static void lpt_init_clock_gating(struct drm_device *dev)
 {
-       enum dpio_phy phy;
-
-       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
-                    power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
-
-       if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
-               phy = DPIO_PHY0;
-               assert_pll_disabled(dev_priv, PIPE_A);
-               assert_pll_disabled(dev_priv, PIPE_B);
-       } else {
-               phy = DPIO_PHY1;
-               assert_pll_disabled(dev_priv, PIPE_C);
-       }
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
-                  ~PHY_COM_LANE_RESET_DEASSERT(phy));
+       /*
+        * TODO: this bit should only be enabled when really needed, then
+        * disabled when not needed anymore in order to save power.
+        */
+       if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+               I915_WRITE(SOUTH_DSPCLK_GATE_D,
+                          I915_READ(SOUTH_DSPCLK_GATE_D) |
+                          PCH_LP_PARTITION_LEVEL_DISABLE);
 
-       vlv_set_power_well(dev_priv, power_well, false);
+       /* WADPOClockGatingDisable:hsw */
+       I915_WRITE(_TRANSA_CHICKEN1,
+                  I915_READ(_TRANSA_CHICKEN1) |
+                  TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
 }
 
-static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
-                                       struct i915_power_well *power_well)
+static void lpt_suspend_hw(struct drm_device *dev)
 {
-       enum pipe pipe = power_well->data;
-       bool enabled;
-       u32 state, ctrl;
-
-       mutex_lock(&dev_priv->rps.hw_lock);
-
-       state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
-       /*
-        * We only ever set the power-on and power-gate states, anything
-        * else is unexpected.
-        */
-       WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
-       enabled = state == DP_SSS_PWR_ON(pipe);
-
-       /*
-        * A transient state at this point would mean some unexpected party
-        * is poking at the power controls too.
-        */
-       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
-       WARN_ON(ctrl << 16 != state);
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+               uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
 
-       return enabled;
+               val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
+               I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+       }
 }
 
-static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
-                                   struct i915_power_well *power_well,
-                                   bool enable)
+static void broadwell_init_clock_gating(struct drm_device *dev)
 {
-       enum pipe pipe = power_well->data;
-       u32 state;
-       u32 ctrl;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum pipe pipe;
 
-       state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
+       I915_WRITE(WM3_LP_ILK, 0);
+       I915_WRITE(WM2_LP_ILK, 0);
+       I915_WRITE(WM1_LP_ILK, 0);
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       /* WaSwitchSolVfFArbitrationPriority:bdw */
+       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
 
-#define COND \
-       ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
+       /* WaPsrDPAMaskVBlankInSRD:bdw */
+       I915_WRITE(CHICKEN_PAR1_1,
+                  I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
 
-       if (COND)
-               goto out;
+       /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
+       for_each_pipe(dev_priv, pipe) {
+               I915_WRITE(CHICKEN_PIPESL_1(pipe),
+                          I915_READ(CHICKEN_PIPESL_1(pipe)) |
+                          BDW_DPRS_MASK_VBLANK_SRD);
+       }
 
-       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
-       ctrl &= ~DP_SSC_MASK(pipe);
-       ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
-       vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
+       /* WaVSRefCountFullforceMissDisable:bdw */
+       /* WaDSRefCountFullforceMissDisable:bdw */
+       I915_WRITE(GEN7_FF_THREAD_MODE,
+                  I915_READ(GEN7_FF_THREAD_MODE) &
+                  ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
 
-       if (wait_for(COND, 100))
-               DRM_ERROR("timout setting power well state %08x (%08x)\n",
-                         state,
-                         vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
+       I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+                  _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
 
-#undef COND
+       /* WaDisableSDEUnitClockGating:bdw */
+       I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+                  GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
-out:
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       lpt_init_clock_gating(dev);
 }
 
-static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
-                                       struct i915_power_well *power_well)
+static void haswell_init_clock_gating(struct drm_device *dev)
 {
-       chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
-}
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
-                                      struct i915_power_well *power_well)
-{
-       WARN_ON_ONCE(power_well->data != PIPE_A &&
-                    power_well->data != PIPE_B &&
-                    power_well->data != PIPE_C);
+       ilk_init_lp_watermarks(dev);
 
-       chv_set_pipe_power_well(dev_priv, power_well, true);
-}
+       /* L3 caching of data atomics doesn't work -- disable it. */
+       I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
+       I915_WRITE(HSW_ROW_CHICKEN3,
+                  _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
 
-static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
-                                       struct i915_power_well *power_well)
-{
-       WARN_ON_ONCE(power_well->data != PIPE_A &&
-                    power_well->data != PIPE_B &&
-                    power_well->data != PIPE_C);
+       /* This is required by WaCatErrorRejectionIssue:hsw */
+       I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+                       I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+                       GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
-       chv_set_pipe_power_well(dev_priv, power_well, false);
-}
+       /* WaVSRefCountFullforceMissDisable:hsw */
+       I915_WRITE(GEN7_FF_THREAD_MODE,
+                  I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
 
-static void check_power_well_state(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
+       /* WaDisable_RenderCache_OperationalFlush:hsw */
+       I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
-       if (power_well->always_on || !i915.disable_power_well) {
-               if (!enabled)
-                       goto mismatch;
+       /* enable HiZ Raw Stall Optimization */
+       I915_WRITE(CACHE_MODE_0_GEN7,
+                  _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
 
-               return;
-       }
+       /* WaDisable4x2SubspanOptimization:hsw */
+       I915_WRITE(CACHE_MODE_1,
+                  _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+       /*
+        * BSpec recommends 8x4 when MSAA is used,
+        * however in practice 16x4 seems fastest.
+        *
+        * Note that PS/WM thread counts depend on the WIZ hashing
+        * disable bit, which we don't touch here, but it's good
+        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+        */
+       I915_WRITE(GEN7_GT_MODE,
+                  GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 
-       if (enabled != (power_well->count > 0))
-               goto mismatch;
+       /* WaSwitchSolVfFArbitrationPriority:hsw */
+       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
 
-       return;
+       /* WaRsPkgCStateDisplayPMReq:hsw */
+       I915_WRITE(CHICKEN_PAR1_1,
+                  I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
 
-mismatch:
-       WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
-                 power_well->name, power_well->always_on, enabled,
-                 power_well->count, i915.disable_power_well);
+       lpt_init_clock_gating(dev);
 }
 
-void intel_display_power_get(struct drm_i915_private *dev_priv,
-                            enum intel_display_power_domain domain)
+static void ivybridge_init_clock_gating(struct drm_device *dev)
 {
-       struct i915_power_domains *power_domains;
-       struct i915_power_well *power_well;
-       int i;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t snpcr;
 
-       intel_runtime_pm_get(dev_priv);
+       ilk_init_lp_watermarks(dev);
 
-       power_domains = &dev_priv->power_domains;
+       I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
-       mutex_lock(&power_domains->lock);
+       /* WaDisableEarlyCull:ivb */
+       I915_WRITE(_3D_CHICKEN3,
+                  _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
 
-       for_each_power_well(i, power_well, BIT(domain), power_domains) {
-               if (!power_well->count++) {
-                       DRM_DEBUG_KMS("enabling %s\n", power_well->name);
-                       power_well->ops->enable(dev_priv, power_well);
-                       power_well->hw_enabled = true;
-               }
+       /* WaDisableBackToBackFlipFix:ivb */
+       I915_WRITE(IVB_CHICKEN3,
+                  CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+                  CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
-               check_power_well_state(dev_priv, power_well);
-       }
+       /* WaDisablePSDDualDispatchEnable:ivb */
+       if (IS_IVB_GT1(dev))
+               I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+                          _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
 
-       power_domains->domain_use_count[domain]++;
+       /* WaDisable_RenderCache_OperationalFlush:ivb */
+       I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
-       mutex_unlock(&power_domains->lock);
-}
+       /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
+       I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+                  GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 
-void intel_display_power_put(struct drm_i915_private *dev_priv,
-                            enum intel_display_power_domain domain)
-{
-       struct i915_power_domains *power_domains;
-       struct i915_power_well *power_well;
-       int i;
+       /* WaApplyL3ControlAndL3ChickenMode:ivb */
+       I915_WRITE(GEN7_L3CNTLREG1,
+                       GEN7_WA_FOR_GEN7_L3_CONTROL);
+       I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+                  GEN7_WA_L3_CHICKEN_MODE);
+       if (IS_IVB_GT1(dev))
+               I915_WRITE(GEN7_ROW_CHICKEN2,
+                          _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+       else {
+               /* must write both registers */
+               I915_WRITE(GEN7_ROW_CHICKEN2,
+                          _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+               I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+                          _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+       }
 
-       power_domains = &dev_priv->power_domains;
+       /* WaForceL3Serialization:ivb */
+       I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+                  ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 
-       mutex_lock(&power_domains->lock);
+       /*
+        * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+        * This implements the WaDisableRCZUnitClockGating:ivb workaround.
+        */
+       I915_WRITE(GEN6_UCGCTL2,
+                  GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 
-       WARN_ON(!power_domains->domain_use_count[domain]);
-       power_domains->domain_use_count[domain]--;
+       /* This is required by WaCatErrorRejectionIssue:ivb */
+       I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+                       I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+                       GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
-       for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
-               WARN_ON(!power_well->count);
+       g4x_disable_trickle_feed(dev);
 
-               if (!--power_well->count && i915.disable_power_well) {
-                       DRM_DEBUG_KMS("disabling %s\n", power_well->name);
-                       power_well->hw_enabled = false;
-                       power_well->ops->disable(dev_priv, power_well);
-               }
+       gen7_setup_fixed_func_scheduler(dev_priv);
 
-               check_power_well_state(dev_priv, power_well);
+       if (0) { /* causes HiZ corruption on ivb:gt1 */
+               /* enable HiZ Raw Stall Optimization */
+               I915_WRITE(CACHE_MODE_0_GEN7,
+                          _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
        }
 
-       mutex_unlock(&power_domains->lock);
-
-       intel_runtime_pm_put(dev_priv);
-}
-
-static struct i915_power_domains *hsw_pwr;
-
-/* Display audio driver power well request */
-int i915_request_power_well(void)
-{
-       struct drm_i915_private *dev_priv;
-
-       if (!hsw_pwr)
-               return -ENODEV;
+       /* WaDisable4x2SubspanOptimization:ivb */
+       I915_WRITE(CACHE_MODE_1,
+                  _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
-       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
-                               power_domains);
-       intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(i915_request_power_well);
+       /*
+        * BSpec recommends 8x4 when MSAA is used,
+        * however in practice 16x4 seems fastest.
+        *
+        * Note that PS/WM thread counts depend on the WIZ hashing
+        * disable bit, which we don't touch here, but it's good
+        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+        */
+       I915_WRITE(GEN7_GT_MODE,
+                  GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 
-/* Display audio driver power well release */
-int i915_release_power_well(void)
-{
-       struct drm_i915_private *dev_priv;
+       snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+       snpcr &= ~GEN6_MBC_SNPCR_MASK;
+       snpcr |= GEN6_MBC_SNPCR_MED;
+       I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
 
-       if (!hsw_pwr)
-               return -ENODEV;
+       if (!HAS_PCH_NOP(dev))
+               cpt_init_clock_gating(dev);
 
-       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
-                               power_domains);
-       intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
-       return 0;
+       gen6_check_mch_setup(dev);
 }
-EXPORT_SYMBOL_GPL(i915_release_power_well);
 
-/*
- * Private interface for the audio driver to get CDCLK in kHz.
- *
- * Caller must request power well using i915_request_power_well() prior to
- * making the call.
- */
-int i915_get_cdclk_freq(void)
+static void valleyview_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv;
-
-       if (!hsw_pwr)
-               return -ENODEV;
-
-       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
-                               power_domains);
-
-       return intel_ddi_get_cdclk_freq(dev_priv);
-}
-EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
-
-
-#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
-
-#define HSW_ALWAYS_ON_POWER_DOMAINS (                  \
-       BIT(POWER_DOMAIN_PIPE_A) |                      \
-       BIT(POWER_DOMAIN_TRANSCODER_EDP) |              \
-       BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |          \
-       BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |          \
-       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |          \
-       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |          \
-       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |          \
-       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |          \
-       BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |          \
-       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |          \
-       BIT(POWER_DOMAIN_PORT_CRT) |                    \
-       BIT(POWER_DOMAIN_PLLS) |                        \
-       BIT(POWER_DOMAIN_INIT))
-#define HSW_DISPLAY_POWER_DOMAINS (                            \
-       (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |    \
-       BIT(POWER_DOMAIN_INIT))
-
-#define BDW_ALWAYS_ON_POWER_DOMAINS (                  \
-       HSW_ALWAYS_ON_POWER_DOMAINS |                   \
-       BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
-#define BDW_DISPLAY_POWER_DOMAINS (                            \
-       (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |    \
-       BIT(POWER_DOMAIN_INIT))
-
-#define VLV_ALWAYS_ON_POWER_DOMAINS    BIT(POWER_DOMAIN_INIT)
-#define VLV_DISPLAY_POWER_DOMAINS      POWER_DOMAIN_MASK
-
-#define VLV_DPIO_CMN_BC_POWER_DOMAINS (                \
-       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_CRT) |            \
-       BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
-       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
-       BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
-       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
-       BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
-       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
-       BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
-       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
-       BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_A_POWER_DOMAINS (     \
-       BIT(POWER_DOMAIN_PIPE_A) |      \
-       BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_B_POWER_DOMAINS (     \
-       BIT(POWER_DOMAIN_PIPE_B) |      \
-       BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_C_POWER_DOMAINS (     \
-       BIT(POWER_DOMAIN_PIPE_C) |      \
-       BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_BC_POWER_DOMAINS (                \
-       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
-       BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_D_POWER_DOMAINS (         \
-       BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
-       BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
-       BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
-       BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
-       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
-       BIT(POWER_DOMAIN_INIT))
-
-static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
-       .sync_hw = i9xx_always_on_power_well_noop,
-       .enable = i9xx_always_on_power_well_noop,
-       .disable = i9xx_always_on_power_well_noop,
-       .is_enabled = i9xx_always_on_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_pipe_power_well_ops = {
-       .sync_hw = chv_pipe_power_well_sync_hw,
-       .enable = chv_pipe_power_well_enable,
-       .disable = chv_pipe_power_well_disable,
-       .is_enabled = chv_pipe_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
-       .sync_hw = vlv_power_well_sync_hw,
-       .enable = chv_dpio_cmn_power_well_enable,
-       .disable = chv_dpio_cmn_power_well_disable,
-       .is_enabled = vlv_power_well_enabled,
-};
-
-static struct i915_power_well i9xx_always_on_power_well[] = {
-       {
-               .name = "always-on",
-               .always_on = 1,
-               .domains = POWER_DOMAIN_MASK,
-               .ops = &i9xx_always_on_power_well_ops,
-       },
-};
-
-static const struct i915_power_well_ops hsw_power_well_ops = {
-       .sync_hw = hsw_power_well_sync_hw,
-       .enable = hsw_power_well_enable,
-       .disable = hsw_power_well_disable,
-       .is_enabled = hsw_power_well_enabled,
-};
-
-static struct i915_power_well hsw_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = 1,
-               .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
-               .ops = &i9xx_always_on_power_well_ops,
-       },
-       {
-               .name = "display",
-               .domains = HSW_DISPLAY_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-       },
-};
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-static struct i915_power_well bdw_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = 1,
-               .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
-               .ops = &i9xx_always_on_power_well_ops,
-       },
-       {
-               .name = "display",
-               .domains = BDW_DISPLAY_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-       },
-};
+       I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
-static const struct i915_power_well_ops vlv_display_power_well_ops = {
-       .sync_hw = vlv_power_well_sync_hw,
-       .enable = vlv_display_power_well_enable,
-       .disable = vlv_display_power_well_disable,
-       .is_enabled = vlv_power_well_enabled,
-};
+       /* WaDisableEarlyCull:vlv */
+       I915_WRITE(_3D_CHICKEN3,
+                  _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
 
-static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
-       .sync_hw = vlv_power_well_sync_hw,
-       .enable = vlv_dpio_cmn_power_well_enable,
-       .disable = vlv_dpio_cmn_power_well_disable,
-       .is_enabled = vlv_power_well_enabled,
-};
+       /* WaDisableBackToBackFlipFix:vlv */
+       I915_WRITE(IVB_CHICKEN3,
+                  CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+                  CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
-static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
-       .sync_hw = vlv_power_well_sync_hw,
-       .enable = vlv_power_well_enable,
-       .disable = vlv_power_well_disable,
-       .is_enabled = vlv_power_well_enabled,
-};
+       /* WaPsdDispatchEnable:vlv */
+       /* WaDisablePSDDualDispatchEnable:vlv */
+       I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+                  _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
+                                     GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
 
-static struct i915_power_well vlv_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = 1,
-               .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
-               .ops = &i9xx_always_on_power_well_ops,
-       },
-       {
-               .name = "display",
-               .domains = VLV_DISPLAY_POWER_DOMAINS,
-               .data = PUNIT_POWER_WELL_DISP2D,
-               .ops = &vlv_display_power_well_ops,
-       },
-       {
-               .name = "dpio-tx-b-01",
-               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
-       },
-       {
-               .name = "dpio-tx-b-23",
-               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
-       },
-       {
-               .name = "dpio-tx-c-01",
-               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
-       },
-       {
-               .name = "dpio-tx-c-23",
-               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
-       },
-       {
-               .name = "dpio-common",
-               .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
-               .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
-               .ops = &vlv_dpio_cmn_power_well_ops,
-       },
-};
+       /* WaDisable_RenderCache_OperationalFlush:vlv */
+       I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
-static struct i915_power_well chv_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = 1,
-               .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
-               .ops = &i9xx_always_on_power_well_ops,
-       },
-#if 0
-       {
-               .name = "display",
-               .domains = VLV_DISPLAY_POWER_DOMAINS,
-               .data = PUNIT_POWER_WELL_DISP2D,
-               .ops = &vlv_display_power_well_ops,
-       },
-       {
-               .name = "pipe-a",
-               .domains = CHV_PIPE_A_POWER_DOMAINS,
-               .data = PIPE_A,
-               .ops = &chv_pipe_power_well_ops,
-       },
-       {
-               .name = "pipe-b",
-               .domains = CHV_PIPE_B_POWER_DOMAINS,
-               .data = PIPE_B,
-               .ops = &chv_pipe_power_well_ops,
-       },
-       {
-               .name = "pipe-c",
-               .domains = CHV_PIPE_C_POWER_DOMAINS,
-               .data = PIPE_C,
-               .ops = &chv_pipe_power_well_ops,
-       },
-#endif
-       {
-               .name = "dpio-common-bc",
-               /*
-                * XXX: cmnreset for one PHY seems to disturb the other.
-                * As a workaround keep both powered on at the same
-                * time for now.
-                */
-               .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
-               .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
-               .ops = &chv_dpio_cmn_power_well_ops,
-       },
-       {
-               .name = "dpio-common-d",
-               /*
-                * XXX: cmnreset for one PHY seems to disturb the other.
-                * As a workaround keep both powered on at the same
-                * time for now.
-                */
-               .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
-               .data = PUNIT_POWER_WELL_DPIO_CMN_D,
-               .ops = &chv_dpio_cmn_power_well_ops,
-       },
-#if 0
-       {
-               .name = "dpio-tx-b-01",
-               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
-       },
-       {
-               .name = "dpio-tx-b-23",
-               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
-       },
-       {
-               .name = "dpio-tx-c-01",
-               .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
-       },
-       {
-               .name = "dpio-tx-c-23",
-               .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
-       },
-       {
-               .name = "dpio-tx-d-01",
-               .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
-                          CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
-       },
-       {
-               .name = "dpio-tx-d-23",
-               .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
-                          CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
-       },
-#endif
-};
+       /* WaForceL3Serialization:vlv */
+       I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+                  ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 
-static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
-                                                enum punit_power_well power_well_id)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *power_well;
-       int i;
+       /* WaDisableDopClockGating:vlv */
+       I915_WRITE(GEN7_ROW_CHICKEN2,
+                  _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
 
-       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
-               if (power_well->data == power_well_id)
-                       return power_well;
-       }
+       /* This is required by WaCatErrorRejectionIssue:vlv */
+       I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+                  I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+                  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
-       return NULL;
-}
+       gen7_setup_fixed_func_scheduler(dev_priv);
 
-#define set_power_wells(power_domains, __power_wells) ({               \
-       (power_domains)->power_wells = (__power_wells);                 \
-       (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
-})
+       /*
+        * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+        * This implements the WaDisableRCZUnitClockGating:vlv workaround.
+        */
+       I915_WRITE(GEN6_UCGCTL2,
+                  GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 
-int intel_power_domains_init(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       /* WaDisableL3Bank2xClockGate:vlv
+        * Disabling L3 clock gating- MMIO 940c[25] = 1
+        * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
+       I915_WRITE(GEN7_UCGCTL4,
+                  I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
 
-       mutex_init(&power_domains->lock);
+       I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
 
        /*
-        * The enabling order will be from lower to higher indexed wells,
-        * the disabling order is reversed.
+        * BSpec says this must be set, even though
+        * WaDisable4x2SubspanOptimization isn't listed for VLV.
         */
-       if (IS_HASWELL(dev_priv->dev)) {
-               set_power_wells(power_domains, hsw_power_wells);
-               hsw_pwr = power_domains;
-       } else if (IS_BROADWELL(dev_priv->dev)) {
-               set_power_wells(power_domains, bdw_power_wells);
-               hsw_pwr = power_domains;
-       } else if (IS_CHERRYVIEW(dev_priv->dev)) {
-               set_power_wells(power_domains, chv_power_wells);
-       } else if (IS_VALLEYVIEW(dev_priv->dev)) {
-               set_power_wells(power_domains, vlv_power_wells);
-       } else {
-               set_power_wells(power_domains, i9xx_always_on_power_well);
-       }
+       I915_WRITE(CACHE_MODE_1,
+                  _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
-       return 0;
-}
+       /*
+        * WaIncreaseL3CreditsForVLVB0:vlv
+        * This is the hardware default actually.
+        */
+       I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
 
-void intel_power_domains_remove(struct drm_i915_private *dev_priv)
-{
-       hsw_pwr = NULL;
+       /*
+        * WaDisableVLVClockGating_VBIIssue:vlv
+        * Disable clock gating on th GCFG unit to prevent a delay
+        * in the reporting of vblank events.
+        */
+       I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
 }
 
-static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
+static void cherryview_init_clock_gating(struct drm_device *dev)
 {
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *power_well;
-       int i;
-
-       mutex_lock(&power_domains->lock);
-       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
-               power_well->ops->sync_hw(dev_priv, power_well);
-               power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
-                                                                    power_well);
-       }
-       mutex_unlock(&power_domains->lock);
-}
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_well *cmn =
-               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
-       struct i915_power_well *disp2d =
-               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
+       I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
-       /* nothing to do if common lane is already off */
-       if (!cmn->ops->is_enabled(dev_priv, cmn))
-               return;
+       I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
 
-       /* If the display might be already active skip this */
-       if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
-           I915_READ(DPIO_CTL) & DPIO_CMNRST)
-               return;
+       /* WaVSRefCountFullforceMissDisable:chv */
+       /* WaDSRefCountFullforceMissDisable:chv */
+       I915_WRITE(GEN7_FF_THREAD_MODE,
+                  I915_READ(GEN7_FF_THREAD_MODE) &
+                  ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
 
-       DRM_DEBUG_KMS("toggling display PHY side reset\n");
+       /* WaDisableSemaphoreAndSyncFlipWait:chv */
+       I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+                  _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
 
-       /* cmnlane needs DPLL registers */
-       disp2d->ops->enable(dev_priv, disp2d);
+       /* WaDisableCSUnitClockGating:chv */
+       I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+                  GEN6_CSUNIT_CLOCK_GATE_DISABLE);
 
-       /*
-        * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
-        * Need to assert and de-assert PHY SB reset by gating the
-        * common lane power, then un-gating it.
-        * Simply ungating isn't enough to reset the PHY enough to get
-        * ports and lanes running.
-        */
-       cmn->ops->disable(dev_priv, cmn);
+       /* WaDisableSDEUnitClockGating:chv */
+       I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+                  GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 }
 
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
+static void g4x_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_device *dev = dev_priv->dev;
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t dspclk_gate;
+
+       I915_WRITE(RENCLK_GATE_D1, 0);
+       I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+                  GS_UNIT_CLOCK_GATE_DISABLE |
+                  CL_UNIT_CLOCK_GATE_DISABLE);
+       I915_WRITE(RAMCLK_GATE_D, 0);
+       dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
+               OVRUNIT_CLOCK_GATE_DISABLE |
+               OVCUNIT_CLOCK_GATE_DISABLE;
+       if (IS_GM45(dev))
+               dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
+       I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
 
-       power_domains->initializing = true;
+       /* WaDisableRenderCachePipelinedFlush */
+       I915_WRITE(CACHE_MODE_0,
+                  _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
 
-       if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
-               mutex_lock(&power_domains->lock);
-               vlv_cmnlane_wa(dev_priv);
-               mutex_unlock(&power_domains->lock);
-       }
+       /* WaDisable_RenderCache_OperationalFlush:g4x */
+       I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
-       /* For now, we need the power well to be always enabled. */
-       intel_display_set_init_power(dev_priv, true);
-       intel_power_domains_resume(dev_priv);
-       power_domains->initializing = false;
+       g4x_disable_trickle_feed(dev);
 }
 
-void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
+static void crestline_init_clock_gating(struct drm_device *dev)
 {
-       intel_runtime_pm_get(dev_priv);
-}
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
-{
-       intel_runtime_pm_put(dev_priv);
+       I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
+       I915_WRITE(RENCLK_GATE_D2, 0);
+       I915_WRITE(DSPCLK_GATE_D, 0);
+       I915_WRITE(RAMCLK_GATE_D, 0);
+       I915_WRITE16(DEUC, 0);
+       I915_WRITE(MI_ARB_STATE,
+                  _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+
+       /* WaDisable_RenderCache_OperationalFlush:gen4 */
+       I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 }
 
-void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
+static void broadwater_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_device *dev = dev_priv->dev;
-       struct device *device = &dev->pdev->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (!HAS_RUNTIME_PM(dev))
-               return;
+       I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+                  I965_RCC_CLOCK_GATE_DISABLE |
+                  I965_RCPB_CLOCK_GATE_DISABLE |
+                  I965_ISC_CLOCK_GATE_DISABLE |
+                  I965_FBC_CLOCK_GATE_DISABLE);
+       I915_WRITE(RENCLK_GATE_D2, 0);
+       I915_WRITE(MI_ARB_STATE,
+                  _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
 
-       pm_runtime_get_sync(device);
-       WARN(dev_priv->pm.suspended, "Device still suspended.\n");
+       /* WaDisable_RenderCache_OperationalFlush:gen4 */
+       I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 }
 
-void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
+static void gen3_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_device *dev = dev_priv->dev;
-       struct device *device = &dev->pdev->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 dstate = I915_READ(D_STATE);
 
-       if (!HAS_RUNTIME_PM(dev))
-               return;
+       dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
+               DSTATE_DOT_CLOCK_GATING;
+       I915_WRITE(D_STATE, dstate);
+
+       if (IS_PINEVIEW(dev))
+               I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+
+       /* IIR "flip pending" means done if this bit is set */
+       I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+
+       /* interrupts should cause a wake up from C3 */
+       I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
+
+       /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
+       I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
 
-       WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
-       pm_runtime_get_noresume(device);
+       I915_WRITE(MI_ARB_STATE,
+                  _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
 }
 
-void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
+static void i85x_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_device *dev = dev_priv->dev;
-       struct device *device = &dev->pdev->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (!HAS_RUNTIME_PM(dev))
-               return;
+       I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+
+       /* interrupts should cause a wake up from C3 */
+       I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
+                  _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
 
-       pm_runtime_mark_last_busy(device);
-       pm_runtime_put_autosuspend(device);
+       I915_WRITE(MEM_MODE,
+                  _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
 }
 
-void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
+static void i830_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_device *dev = dev_priv->dev;
-       struct device *device = &dev->pdev->dev;
-
-       if (!HAS_RUNTIME_PM(dev))
-               return;
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       pm_runtime_set_active(device);
+       I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
 
-       /*
-        * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
-        * requirement.
-        */
-       if (!intel_enable_rc6(dev)) {
-               DRM_INFO("RC6 disabled, disabling runtime PM support\n");
-               return;
-       }
+       I915_WRITE(MEM_MODE,
+                  _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
+                  _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
+}
 
-       pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
-       pm_runtime_mark_last_busy(device);
-       pm_runtime_use_autosuspend(device);
+void intel_init_clock_gating(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       pm_runtime_put_autosuspend(device);
+       dev_priv->display.init_clock_gating(dev);
 }
 
-void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
+void intel_suspend_hw(struct drm_device *dev)
 {
-       struct drm_device *dev = dev_priv->dev;
-       struct device *device = &dev->pdev->dev;
+       if (HAS_PCH_LPT(dev))
+               lpt_suspend_hw(dev);
+}
 
-       if (!HAS_RUNTIME_PM(dev))
+static void intel_init_fbc(struct drm_i915_private *dev_priv)
+{
+       if (!HAS_FBC(dev_priv)) {
+               dev_priv->fbc.enabled = false;
                return;
+       }
 
-       if (!intel_enable_rc6(dev))
-               return;
+       if (INTEL_INFO(dev_priv)->gen >= 7) {
+               dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+               dev_priv->display.enable_fbc = gen7_enable_fbc;
+               dev_priv->display.disable_fbc = ironlake_disable_fbc;
+       } else if (INTEL_INFO(dev_priv)->gen >= 5) {
+               dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+               dev_priv->display.enable_fbc = ironlake_enable_fbc;
+               dev_priv->display.disable_fbc = ironlake_disable_fbc;
+       } else if (IS_GM45(dev_priv)) {
+               dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+               dev_priv->display.enable_fbc = g4x_enable_fbc;
+               dev_priv->display.disable_fbc = g4x_disable_fbc;
+       } else {
+               dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+               dev_priv->display.enable_fbc = i8xx_enable_fbc;
+               dev_priv->display.disable_fbc = i8xx_disable_fbc;
+
+               /* This value was pulled out of someone's hat */
+               I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
+       }
 
-       /* Make sure we're not suspended first. */
-       pm_runtime_get_sync(device);
-       pm_runtime_disable(device);
+       dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
 }
 
 /* Set up chip specific power management-related functions */
@@ -7203,28 +7077,7 @@ void intel_init_pm(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (HAS_FBC(dev)) {
-               if (INTEL_INFO(dev)->gen >= 7) {
-                       dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
-                       dev_priv->display.enable_fbc = gen7_enable_fbc;
-                       dev_priv->display.disable_fbc = ironlake_disable_fbc;
-               } else if (INTEL_INFO(dev)->gen >= 5) {
-                       dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
-                       dev_priv->display.enable_fbc = ironlake_enable_fbc;
-                       dev_priv->display.disable_fbc = ironlake_disable_fbc;
-               } else if (IS_GM45(dev)) {
-                       dev_priv->display.fbc_enabled = g4x_fbc_enabled;
-                       dev_priv->display.enable_fbc = g4x_enable_fbc;
-                       dev_priv->display.disable_fbc = g4x_disable_fbc;
-               } else {
-                       dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
-                       dev_priv->display.enable_fbc = i8xx_enable_fbc;
-                       dev_priv->display.disable_fbc = i8xx_disable_fbc;
-
-                       /* This value was pulled out of someone's hat */
-                       I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
-               }
-       }
+       intel_init_fbc(dev_priv);
 
        /* For cxsr */
        if (IS_PINEVIEW(dev))
@@ -7233,7 +7086,13 @@ void intel_init_pm(struct drm_device *dev)
                i915_ironlake_get_mem_freq(dev);
 
        /* For FIFO watermark updates */
-       if (HAS_PCH_SPLIT(dev)) {
+       if (INTEL_INFO(dev)->gen >= 9) {
+               skl_setup_wm_latency(dev);
+
+               dev_priv->display.init_clock_gating = gen9_init_clock_gating;
+               dev_priv->display.update_wm = skl_update_wm;
+               dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
+       } else if (HAS_PCH_SPLIT(dev)) {
                ilk_setup_wm_latency(dev);
 
                if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
@@ -7314,7 +7173,7 @@ void intel_init_pm(struct drm_device *dev)
        }
 }
 
-int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
 {
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
@@ -7324,6 +7183,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
        }
 
        I915_WRITE(GEN6_PCODE_DATA, *val);
+       I915_WRITE(GEN6_PCODE_DATA1, 0);
        I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
 
        if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
@@ -7338,7 +7198,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
        return 0;
 }
 
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
 {
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
@@ -7361,99 +7221,66 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
        return 0;
 }
 
-static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
+static int vlv_gpu_freq_div(unsigned int czclk_freq)
 {
-       int div;
-
-       /* 4 x czclk */
-       switch (dev_priv->mem_freq) {
-       case 800:
-               div = 10;
-               break;
-       case 1066:
-               div = 12;
-               break;
-       case 1333:
-               div = 16;
-               break;
+       switch (czclk_freq) {
+       case 200:
+               return 10;
+       case 267:
+               return 12;
+       case 320:
+       case 333:
+               return 16;
+       case 400:
+               return 20;
        default:
                return -1;
        }
+}
+
+static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+       int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
 
-       return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
+       div = vlv_gpu_freq_div(czclk_freq);
+       if (div < 0)
+               return div;
+
+       return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
 }
 
 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
-       int mul;
+       int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
 
-       /* 4 x czclk */
-       switch (dev_priv->mem_freq) {
-       case 800:
-               mul = 10;
-               break;
-       case 1066:
-               mul = 12;
-               break;
-       case 1333:
-               mul = 16;
-               break;
-       default:
-               return -1;
-       }
+       mul = vlv_gpu_freq_div(czclk_freq);
+       if (mul < 0)
+               return mul;
 
-       return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
+       return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
 }
 
 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
-       int div, freq;
-
-       switch (dev_priv->rps.cz_freq) {
-       case 200:
-               div = 5;
-               break;
-       case 267:
-               div = 6;
-               break;
-       case 320:
-       case 333:
-       case 400:
-               div = 8;
-               break;
-       default:
-               return -1;
-       }
+       int div, czclk_freq = dev_priv->rps.cz_freq;
 
-       freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
+       div = vlv_gpu_freq_div(czclk_freq) / 2;
+       if (div < 0)
+               return div;
 
-       return freq;
+       return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
 }
 
 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
-       int mul, opcode;
+       int mul, czclk_freq = dev_priv->rps.cz_freq;
 
-       switch (dev_priv->rps.cz_freq) {
-       case 200:
-               mul = 5;
-               break;
-       case 267:
-               mul = 6;
-               break;
-       case 320:
-       case 333:
-       case 400:
-               mul = 8;
-               break;
-       default:
-               return -1;
-       }
+       mul = vlv_gpu_freq_div(czclk_freq) / 2;
+       if (mul < 0)
+               return mul;
 
        /* CHV needs even values */
-       opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
-
-       return opcode;
+       return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
 }
 
 int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
@@ -7490,5 +7317,4 @@ void intel_pm_setup(struct drm_device *dev)
                          intel_gen6_powersave_work);
 
        dev_priv->pm.suspended = false;
-       dev_priv->pm._irqs_disabled = false;
 }
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
new file mode 100644 (file)
index 0000000..716b8a9
--- /dev/null
@@ -0,0 +1,481 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: Panel Self Refresh (PSR/SRD)
+ *
+ * Since Haswell Display controller supports Panel Self-Refresh on display
+ * panels witch have a remote frame buffer (RFB) implemented according to PSR
+ * spec in eDP1.3. PSR feature allows the display to go to lower standby states
+ * when system is idle but display is on as it eliminates display refresh
+ * request to DDR memory completely as long as the frame buffer for that
+ * display is unchanged.
+ *
+ * Panel Self Refresh must be supported by both Hardware (source) and
+ * Panel (sink).
+ *
+ * PSR saves power by caching the framebuffer in the panel RFB, which allows us
+ * to power down the link and memory controller. For DSI panels the same idea
+ * is called "manual mode".
+ *
+ * The implementation uses the hardware-based PSR support which automatically
+ * enters/exits self-refresh mode. The hardware takes care of sending the
+ * required DP aux message and could even retrain the link (that part isn't
+ * enabled yet though). The hardware also keeps track of any frontbuffer
+ * changes to know when to exit self-refresh mode again. Unfortunately that
+ * part doesn't work too well, hence why the i915 PSR support uses the
+ * software frontbuffer tracking to make sure it doesn't miss a screen
+ * update. For this integration intel_psr_invalidate() and intel_psr_flush()
+ * get called by the frontbuffer tracking code. Note that because of locking
+ * issues the self-refresh re-enable code is done from a work queue, which
+ * must be correctly synchronized/cancelled when shutting down the pipe."
+ */
+
+#include <drm/drmP.h>
+
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+static bool is_edp_psr(struct intel_dp *intel_dp)
+{
+       return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+}
+
+bool intel_psr_is_enabled(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!HAS_PSR(dev))
+               return false;
+
+       return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+}
+
+static void intel_psr_write_vsc(struct intel_dp *intel_dp,
+                                   struct edp_vsc_psr *vsc_psr)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+       u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
+       u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
+       uint32_t *data = (uint32_t *) vsc_psr;
+       unsigned int i;
+
+       /* As per BSPec (Pipe Video Data Island Packet), we need to disable
+          the video DIP being updated before program video DIP data buffer
+          registers for DIP being updated. */
+       I915_WRITE(ctl_reg, 0);
+       POSTING_READ(ctl_reg);
+
+       for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
+               if (i < sizeof(struct edp_vsc_psr))
+                       I915_WRITE(data_reg + i, *data++);
+               else
+                       I915_WRITE(data_reg + i, 0);
+       }
+
+       I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
+       POSTING_READ(ctl_reg);
+}
+
+static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
+{
+       struct edp_vsc_psr psr_vsc;
+
+       /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
+       memset(&psr_vsc, 0, sizeof(psr_vsc));
+       psr_vsc.sdp_header.HB0 = 0;
+       psr_vsc.sdp_header.HB1 = 0x7;
+       psr_vsc.sdp_header.HB2 = 0x2;
+       psr_vsc.sdp_header.HB3 = 0x8;
+       intel_psr_write_vsc(intel_dp, &psr_vsc);
+}
+
+static void intel_psr_enable_sink(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t aux_clock_divider;
+       int precharge = 0x3;
+       bool only_standby = false;
+       static const uint8_t aux_msg[] = {
+               [0] = DP_AUX_NATIVE_WRITE << 4,
+               [1] = DP_SET_POWER >> 8,
+               [2] = DP_SET_POWER & 0xff,
+               [3] = 1 - 1,
+               [4] = DP_SET_POWER_D0,
+       };
+       int i;
+
+       BUILD_BUG_ON(sizeof(aux_msg) > 20);
+
+       aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+
+       if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+               only_standby = true;
+
+       /* Enable PSR in sink */
+       if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
+               drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+                                  DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
+       else
+               drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+                                  DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
+
+       /* Setup AUX registers */
+       for (i = 0; i < sizeof(aux_msg); i += 4)
+               I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
+                          intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
+
+       I915_WRITE(EDP_PSR_AUX_CTL(dev),
+                  DP_AUX_CH_CTL_TIME_OUT_400us |
+                  (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+                  (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+                  (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
+}
+
+static void intel_psr_enable_source(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t max_sleep_time = 0x1f;
+       uint32_t idle_frames = 1;
+       uint32_t val = 0x0;
+       const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+       bool only_standby = false;
+
+       if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+               only_standby = true;
+
+       if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
+               val |= EDP_PSR_LINK_STANDBY;
+               val |= EDP_PSR_TP2_TP3_TIME_0us;
+               val |= EDP_PSR_TP1_TIME_0us;
+               val |= EDP_PSR_SKIP_AUX_EXIT;
+               val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
+       } else
+               val |= EDP_PSR_LINK_DISABLE;
+
+       I915_WRITE(EDP_PSR_CTL(dev), val |
+                  (IS_BROADWELL(dev) ? 0 : link_entry_time) |
+                  max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
+                  idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
+                  EDP_PSR_ENABLE);
+}
+
+static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc = dig_port->base.base.crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       lockdep_assert_held(&dev_priv->psr.lock);
+       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+       WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+       dev_priv->psr.source_ok = false;
+
+       if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
+               DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
+               return false;
+       }
+
+       if (!i915.enable_psr) {
+               DRM_DEBUG_KMS("PSR disable by flag\n");
+               return false;
+       }
+
+       /* Below limitations aren't valid for Broadwell */
+       if (IS_BROADWELL(dev))
+               goto out;
+
+       if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
+           S3D_ENABLE) {
+               DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+               return false;
+       }
+
+       if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+               DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
+               return false;
+       }
+
+ out:
+       dev_priv->psr.source_ok = true;
+       return true;
+}
+
+static void intel_psr_do_enable(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+       WARN_ON(dev_priv->psr.active);
+       lockdep_assert_held(&dev_priv->psr.lock);
+
+       /* Enable/Re-enable PSR on the host */
+       intel_psr_enable_source(intel_dp);
+
+       dev_priv->psr.active = true;
+}
+
+/**
+ * intel_psr_enable - Enable PSR
+ * @intel_dp: Intel DP
+ *
+ * This function can only be called after the pipe is fully trained and enabled.
+ */
+void intel_psr_enable(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!HAS_PSR(dev)) {
+               DRM_DEBUG_KMS("PSR not supported on this platform\n");
+               return;
+       }
+
+       if (!is_edp_psr(intel_dp)) {
+               DRM_DEBUG_KMS("PSR not supported by this panel\n");
+               return;
+       }
+
+       mutex_lock(&dev_priv->psr.lock);
+       if (dev_priv->psr.enabled) {
+               DRM_DEBUG_KMS("PSR already in use\n");
+               goto unlock;
+       }
+
+       if (!intel_psr_match_conditions(intel_dp))
+               goto unlock;
+
+       dev_priv->psr.busy_frontbuffer_bits = 0;
+
+       intel_psr_setup_vsc(intel_dp);
+
+       /* Avoid continuous PSR exit by masking memup and hpd */
+       I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
+                  EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+
+       /* Enable PSR on the panel */
+       intel_psr_enable_sink(intel_dp);
+
+       dev_priv->psr.enabled = intel_dp;
+unlock:
+       mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_disable - Disable PSR
+ * @intel_dp: Intel DP
+ *
+ * This function needs to be called before disabling pipe.
+ */
+void intel_psr_disable(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       mutex_lock(&dev_priv->psr.lock);
+       if (!dev_priv->psr.enabled) {
+               mutex_unlock(&dev_priv->psr.lock);
+               return;
+       }
+
+       if (dev_priv->psr.active) {
+               I915_WRITE(EDP_PSR_CTL(dev),
+                          I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+
+               /* Wait till PSR is idle */
+               if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
+                              EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+                       DRM_ERROR("Timed out waiting for PSR Idle State\n");
+
+               dev_priv->psr.active = false;
+       } else {
+               WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+       }
+
+       dev_priv->psr.enabled = NULL;
+       mutex_unlock(&dev_priv->psr.lock);
+
+       cancel_delayed_work_sync(&dev_priv->psr.work);
+}
+
+static void intel_psr_work(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), psr.work.work);
+       struct intel_dp *intel_dp = dev_priv->psr.enabled;
+
+       /* We have to make sure PSR is ready for re-enable
+        * otherwise it keeps disabled until next full enable/disable cycle.
+        * PSR might take some time to get fully disabled
+        * and be ready for re-enable.
+        */
+       if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
+                     EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
+               DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+               return;
+       }
+
+       mutex_lock(&dev_priv->psr.lock);
+       intel_dp = dev_priv->psr.enabled;
+
+       if (!intel_dp)
+               goto unlock;
+
+       /*
+        * The delayed work can race with an invalidate hence we need to
+        * recheck. Since psr_flush first clears this and then reschedules we
+        * won't ever miss a flush when bailing out here.
+        */
+       if (dev_priv->psr.busy_frontbuffer_bits)
+               goto unlock;
+
+       intel_psr_do_enable(intel_dp);
+unlock:
+       mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void intel_psr_exit(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->psr.active) {
+               u32 val = I915_READ(EDP_PSR_CTL(dev));
+
+               WARN_ON(!(val & EDP_PSR_ENABLE));
+
+               I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
+
+               dev_priv->psr.active = false;
+       }
+
+}
+
+/**
+ * intel_psr_invalidate - Invalidade PSR
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * Since the hardware frontbuffer tracking has gaps we need to integrate
+ * with the software frontbuffer tracking. This function gets called every
+ * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
+ * disabled if the frontbuffer mask contains a buffer relevant to PSR.
+ *
+ * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
+ */
+void intel_psr_invalidate(struct drm_device *dev,
+                             unsigned frontbuffer_bits)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc;
+       enum pipe pipe;
+
+       mutex_lock(&dev_priv->psr.lock);
+       if (!dev_priv->psr.enabled) {
+               mutex_unlock(&dev_priv->psr.lock);
+               return;
+       }
+
+       crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+       pipe = to_intel_crtc(crtc)->pipe;
+
+       intel_psr_exit(dev);
+
+       frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+
+       dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+       mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_flush - Flush PSR
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * Since the hardware frontbuffer tracking has gaps we need to integrate
+ * with the software frontbuffer tracking. This function gets called every
+ * time frontbuffer rendering has completed and flushed out to memory. PSR
+ * can be enabled again if no other frontbuffer relevant to PSR is dirty.
+ *
+ * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
+ */
+void intel_psr_flush(struct drm_device *dev,
+                        unsigned frontbuffer_bits)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc;
+       enum pipe pipe;
+
+       mutex_lock(&dev_priv->psr.lock);
+       if (!dev_priv->psr.enabled) {
+               mutex_unlock(&dev_priv->psr.lock);
+               return;
+       }
+
+       crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+       pipe = to_intel_crtc(crtc)->pipe;
+       dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+       /*
+        * On Haswell sprite plane updates don't result in a psr invalidating
+        * signal in the hardware. Which means we need to manually fake this in
+        * software for all flushes, not just when we've seen a preceding
+        * invalidation through frontbuffer rendering.
+        */
+       if (IS_HASWELL(dev) &&
+           (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
+               intel_psr_exit(dev);
+
+       if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
+               schedule_delayed_work(&dev_priv->psr.work,
+                                     msecs_to_jiffies(100));
+       mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_init - Init basic PSR work and mutex.
+ * @dev: DRM device
+ *
+ * This function is  called only once at driver load to initialize basic
+ * PSR stuff.
+ */
+void intel_psr_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
+       mutex_init(&dev_priv->psr.lock);
+}
index 6c792d3a9c9caa55a6959868190223b396fd173e..5bd69852752c663ae22ccf4aedd1616518962225 100644 (file)
@@ -29,6 +29,7 @@
 extern const struct intel_renderstate_rodata gen6_null_state;
 extern const struct intel_renderstate_rodata gen7_null_state;
 extern const struct intel_renderstate_rodata gen8_null_state;
+extern const struct intel_renderstate_rodata gen9_null_state;
 
 #define RO_RENDERSTATE(_g)                                             \
        const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
index 75ef1b5de45c12c33337b00db36b30d199d9eb20..78011d73fa9f5afd87e48da808c3d726416f7871 100644 (file)
 #include "intel_renderstate.h"
 
 static const u32 gen8_null_state_relocs[] = {
-       0x00000048,
-       0x00000050,
-       0x00000060,
-       0x000003ec,
+       0x00000798,
+       0x000007a4,
+       0x000007ac,
+       0x000007bc,
        -1,
 };
 
 static const u32 gen8_null_state_batch[] = {
+       0x7a000004,
+       0x01000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
        0x69040000,
-       0x61020001,
+       0x78140000,
+       0x04000000,
+       0x7820000a,
+       0x00000000,
+       0x00000000,
+       0x80000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78130002,
+       0x00000000,
+       0x00000000,
+       0x02001808,
+       0x781f0002,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78510009,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78100007,
+       0x00000000,
+       0x00000000,
+       0x00010000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x781b0007,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000800,
+       0x00000000,
+       0x78110008,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x781e0003,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x781d0007,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78120002,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78500003,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x781c0002,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x780c0000,
+       0x00000000,
+       0x78520003,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78300000,
+       0x08010040,
+       0x78310000,
+       0x1e000000,
+       0x78320000,
+       0x1e000000,
+       0x78330000,
+       0x1e000000,
+       0x79190002,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x791a0002,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x791b0002,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x79120000,
+       0x00000000,
+       0x79130000,
+       0x00000000,
+       0x79140000,
+       0x00000000,
+       0x79150000,
+       0x00000000,
+       0x79160000,
+       0x00000000,
+       0x78150009,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78190009,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x781a0009,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78160009,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78170009,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78490001,
+       0x00000000,
+       0x00000000,
+       0x784a0000,
+       0x00000000,
+       0x784b0000,
+       0x00000004,
+       0x79170101,
+       0x00000000,
+       0x00000080,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
        0x00000000,
        0x00000000,
-       0x79120000,
        0x00000000,
-       0x79130000,
        0x00000000,
-       0x79140000,
        0x00000000,
-       0x79150000,
        0x00000000,
-       0x79160000,
        0x00000000,
-       0x6101000e,
-       0x00000001,
        0x00000000,
-       0x00000001,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x79180006,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x79180006,
+       0x20000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x79180006,
+       0x40000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x79180006,
+       0x60000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x6101000e,
        0x00000001,      /* reloc */
        0x00000000,
+       0x00000000,
        0x00000001,      /* reloc */
        0x00000000,
+       0x00000001,      /* reloc */
        0x00000000,
+       0x00000001,
        0x00000000,
        0x00000001,      /* reloc */
        0x00000000,
-       0xfffff001,
        0x00001001,
-       0xfffff001,
        0x00001001,
-       0x78230000,
-       0x000006e0,
-       0x78210000,
-       0x00000700,
-       0x78300000,
-       0x08010040,
-       0x78330000,
-       0x08000000,
-       0x78310000,
-       0x08000000,
-       0x78320000,
-       0x08000000,
-       0x78240000,
-       0x00000641,
-       0x780e0000,
-       0x00000601,
+       0x00000001,
+       0x00001001,
+       0x61020001,
+       0x00000000,
+       0x00000000,
+       0x79000002,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78050006,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x79040002,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x79040002,
+       0x40000000,
+       0x00000000,
+       0x00000000,
+       0x79040002,
+       0x80000000,
+       0x00000000,
+       0x00000000,
+       0x79040002,
+       0xc0000000,
+       0x00000000,
+       0x00000000,
+       0x79080001,
+       0x00000000,
+       0x00000000,
+       0x790a0001,
+       0x00000000,
+       0x00000000,
+       0x78060003,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78070003,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78040001,
+       0x00000000,
+       0x00000000,
+       0x79110000,
+       0x00000000,
        0x780d0000,
        0x00000000,
-       0x78180000,
-       0x00000001,
-       0x78520003,
+       0x79060000,
        0x00000000,
+       0x7907001f,
        0x00000000,
        0x00000000,
        0x00000000,
-       0x78190009,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -75,7 +580,6 @@ static const u32 gen8_null_state_batch[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-       0x781b0007,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -84,26 +588,22 @@ static const u32 gen8_null_state_batch[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-       0x78270000,
        0x00000000,
-       0x782c0000,
        0x00000000,
-       0x781c0002,
        0x00000000,
        0x00000000,
        0x00000000,
-       0x78160009,
        0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
+       0x7902000f,
        0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
-       0x78110008,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -113,12 +613,10 @@ static const u32 gen8_null_state_batch[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-       0x78290000,
        0x00000000,
-       0x782e0000,
        0x00000000,
-       0x781a0009,
        0x00000000,
+       0x790c000f,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -128,7 +626,6 @@ static const u32 gen8_null_state_batch[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-       0x781d0007,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -136,153 +633,153 @@ static const u32 gen8_null_state_batch[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+       0x780a0003,
        0x00000000,
-       0x78280000,
        0x00000000,
-       0x782d0000,
        0x00000000,
-       0x78260000,
        0x00000000,
-       0x782b0000,
+       0x78080083,
+       0x00004000,
        0x00000000,
-       0x78150009,
        0x00000000,
        0x00000000,
+       0x04004000,
        0x00000000,
        0x00000000,
        0x00000000,
+       0x08004000,
        0x00000000,
        0x00000000,
        0x00000000,
+       0x0c004000,
        0x00000000,
        0x00000000,
-       0x78100007,
        0x00000000,
+       0x10004000,
        0x00000000,
        0x00000000,
        0x00000000,
+       0x14004000,
        0x00000000,
        0x00000000,
        0x00000000,
+       0x18004000,
        0x00000000,
-       0x781e0003,
        0x00000000,
        0x00000000,
+       0x1c004000,
        0x00000000,
        0x00000000,
-       0x78120002,
        0x00000000,
+       0x20004000,
        0x00000000,
        0x00000000,
-       0x781f0002,
-       0x30400820,
        0x00000000,
+       0x24004000,
        0x00000000,
-       0x78510009,
        0x00000000,
        0x00000000,
+       0x28004000,
        0x00000000,
        0x00000000,
        0x00000000,
+       0x2c004000,
        0x00000000,
        0x00000000,
        0x00000000,
+       0x30004000,
        0x00000000,
        0x00000000,
-       0x78500003,
-       0x00210000,
        0x00000000,
+       0x34004000,
        0x00000000,
        0x00000000,
-       0x78130002,
        0x00000000,
+       0x38004000,
        0x00000000,
        0x00000000,
-       0x782a0000,
-       0x00000480,
-       0x782f0000,
-       0x00000540,
-       0x78140000,
-       0x00000800,
-       0x78170009,
        0x00000000,
+       0x3c004000,
        0x00000000,
        0x00000000,
        0x00000000,
+       0x40004000,
        0x00000000,
        0x00000000,
        0x00000000,
+       0x44004000,
        0x00000000,
        0x00000000,
        0x00000000,
-       0x7820000a,
-       0x00000580,
+       0x48004000,
        0x00000000,
-       0x08080000,
        0x00000000,
        0x00000000,
-       0x1f000002,
-       0x00060000,
+       0x4c004000,
        0x00000000,
        0x00000000,
        0x00000000,
+       0x50004000,
        0x00000000,
-       0x784d0000,
-       0x40000000,
-       0x784f0000,
-       0x80000100,
-       0x780f0000,
-       0x00000740,
-       0x78050006,
        0x00000000,
        0x00000000,
+       0x54004000,
        0x00000000,
        0x00000000,
        0x00000000,
+       0x58004000,
        0x00000000,
        0x00000000,
-       0x78070003,
        0x00000000,
+       0x5c004000,
        0x00000000,
        0x00000000,
        0x00000000,
-       0x78060003,
+       0x60004000,
        0x00000000,
        0x00000000,
        0x00000000,
+       0x64004000,
        0x00000000,
-       0x78040001,
        0x00000000,
-       0x00000001,
-       0x79000002,
-       0xffffffff,
+       0x00000000,
+       0x68004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x6c004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x70004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x74004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78004000,
+       0x00000000,
        0x00000000,
        0x00000000,
-       0x78080003,
-       0x00006000,
-       0x000005e0,      /* reloc */
+       0x7c004000,
        0x00000000,
        0x00000000,
-       0x78090005,
+       0x00000000,
+       0x80004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78090043,
        0x02000000,
        0x22220000,
-       0x02f60000,
-       0x11230000,
-       0x02850004,
-       0x11230000,
-       0x784b0000,
-       0x0000000f,
-       0x78490001,
        0x00000000,
        0x00000000,
-       0x7b000005,
        0x00000000,
-       0x00000003,
        0x00000000,
-       0x00000001,
        0x00000000,
        0x00000000,
-       0x05000000,      /* cmds end */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -297,8 +794,6 @@ static const u32 gen8_null_state_batch[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-       0x000004c0,      /* state start */
-       0x00000500,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -345,46 +840,65 @@ static const u32 gen8_null_state_batch[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+       0x680b0001,
+       0x78260000,
+       0x00000000,
+       0x78270000,
+       0x00000000,
+       0x78280000,
+       0x00000000,
+       0x78290000,
+       0x00000000,
+       0x782a0000,
+       0x00000000,
+       0x780e0000,
+       0x00000dc1,
+       0x78240000,
+       0x00000e01,
+       0x784f0000,
+       0x80000100,
+       0x784d0000,
+       0x40000000,
+       0x782b0000,
+       0x00000000,
+       0x782c0000,
+       0x00000000,
+       0x782d0000,
+       0x00000000,
+       0x782e0000,
+       0x00000000,
+       0x782f0000,
+       0x00000000,
+       0x780f0000,
        0x00000000,
+       0x78230000,
+       0x00000e60,
+       0x78210000,
+       0x00000e80,
+       0x7b000005,
+       0x00000004,
+       0x00000001,
        0x00000000,
+       0x00000001,
        0x00000000,
-       0x00000092,
        0x00000000,
+       0x05000000,      /* cmds end */
        0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
+       0x00000000,      /* state start */
        0x00000000,
+       0x3f800000,
+       0x3f800000,
+       0x3f800000,
+       0x3f800000,
        0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
-       0x0060005a,
-       0x21403ae8,
-       0x3a0000c0,
-       0x008d0040,
-       0x0060005a,
-       0x21603ae8,
-       0x3a0000c0,
-       0x008d0080,
-       0x0060005a,
-       0x21803ae8,
-       0x3a0000d0,
-       0x008d0040,
-       0x0060005a,
-       0x21a03ae8,
-       0x3a0000d0,
-       0x008d0080,
-       0x02800031,
-       0x2e0022e8,
-       0x0e000140,
-       0x08840001,
-       0x05800031,
-       0x200022e0,
-       0x0e000e00,
-       0x90031000,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -410,38 +924,6 @@ static const u32 gen8_null_state_batch[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-       0x06200000,
-       0x00000002,
-       0x06200000,
-       0x00000002,
-       0x06200000,
-       0x00000002,
-       0x06200000,
-       0x00000002,
-       0x06200000,
-       0x00000002,
-       0x06200000,
-       0x00000002,
-       0x06200000,
-       0x00000002,
-       0x06200000,
-       0x00000002,
-       0x06200000,
-       0x00000002,
-       0x06200000,
-       0x00000002,
-       0x06200000,
-       0x00000002,
-       0x06200000,
-       0x00000002,
-       0x06200000,
-       0x00000002,
-       0x06200000,
-       0x00000002,
-       0x06200000,
-       0x00000002,
-       0x06200000,
-       0x00000002,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -449,8 +931,6 @@ static const u32 gen8_null_state_batch[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-       0xf99a130c,
-       0x799a130c,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -466,9 +946,7 @@ static const u32 gen8_null_state_batch[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-       0x3f800000,
        0x00000000,
-       0x3f800000,
        0x00000000,
        0x00000000,
        0x00000000,
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
new file mode 100644 (file)
index 0000000..8750753
--- /dev/null
@@ -0,0 +1,974 @@
+#include "intel_renderstate.h"
+
+static const u32 gen9_null_state_relocs[] = {
+       0x000007a8,
+       0x000007b4,
+       0x000007bc,
+       0x000007cc,
+       -1,
+};
+
+static const u32 gen9_null_state_batch[] = {
+       0x7a000004,
+       0x01000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x69040300,
+       0x78140000,
+       0x04000000,
+       0x7820000a,
+       0x00000000,
+       0x00000000,
+       0x80000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78130002,
+       0x00000000,
+       0x00000000,
+       0x02001808,
+       0x781f0004,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78510009,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78100007,
+       0x00000000,
+       0x00000000,
+       0x00010000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x781b0007,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000800,
+       0x00000000,
+       0x78110008,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x781e0003,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x781d0009,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78120002,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78500003,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x781c0002,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x780c0000,
+       0x00000000,
+       0x78520003,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78300000,
+       0x08010040,
+       0x78310000,
+       0x1e000000,
+       0x78320000,
+       0x1e000000,
+       0x78330000,
+       0x1e000000,
+       0x79190002,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x791a0002,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x791b0002,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x79120000,
+       0x00000000,
+       0x79130000,
+       0x00000000,
+       0x79140000,
+       0x00000000,
+       0x79150000,
+       0x00000000,
+       0x79160000,
+       0x00000000,
+       0x78150009,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78190009,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x781a0009,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78160009,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78170009,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78490001,
+       0x00000000,
+       0x00000000,
+       0x784a0000,
+       0x00000000,
+       0x784b0000,
+       0x00000004,
+       0x79170101,
+       0x00000000,
+       0x00000080,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x79180006,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x79180006,
+       0x20000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x79180006,
+       0x40000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x79180006,
+       0x60000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x61010011,
+       0x00000001,      /* reloc */
+       0x00000000,
+       0x00000000,
+       0x00000001,      /* reloc */
+       0x00000000,
+       0x00000001,      /* reloc */
+       0x00000000,
+       0x00000001,
+       0x00000000,
+       0x00000001,      /* reloc */
+       0x00000000,
+       0x00001001,
+       0x00001001,
+       0x00000001,
+       0x00001001,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x61020001,
+       0x00000000,
+       0x00000000,
+       0x79000002,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78050006,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x79040002,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x79040002,
+       0x40000000,
+       0x00000000,
+       0x00000000,
+       0x79040002,
+       0x80000000,
+       0x00000000,
+       0x00000000,
+       0x79040002,
+       0xc0000000,
+       0x00000000,
+       0x00000000,
+       0x79080001,
+       0x00000000,
+       0x00000000,
+       0x790a0001,
+       0x00000000,
+       0x00000000,
+       0x78060003,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78070003,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78040001,
+       0x00000000,
+       0x00000000,
+       0x79110000,
+       0x00000000,
+       0x780d0000,
+       0x00000000,
+       0x79060000,
+       0x00000000,
+       0x7907001f,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x7902000f,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x790c000f,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x780a0003,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78080083,
+       0x00004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x04004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x08004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x0c004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x10004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x14004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x18004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x1c004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x20004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x24004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x28004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x2c004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x30004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x34004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x38004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x3c004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x40004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x44004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x48004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x4c004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x50004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x54004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x58004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x5c004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x60004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x64004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x68004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x6c004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x70004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x74004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x7c004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x80004000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78090043,
+       0x02000000,
+       0x22220000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x78550003,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x680b0001,
+       0x780e0000,
+       0x00000e01,
+       0x78240000,
+       0x00000e41,
+       0x784f0000,
+       0x80000100,
+       0x784d0000,
+       0x40000000,
+       0x782b0000,
+       0x00000000,
+       0x782c0000,
+       0x00000000,
+       0x782d0000,
+       0x00000000,
+       0x782e0000,
+       0x00000000,
+       0x782f0000,
+       0x00000000,
+       0x780f0000,
+       0x00000000,
+       0x78230000,
+       0x00000ea0,
+       0x78210000,
+       0x00000ec0,
+       0x78260000,
+       0x00000000,
+       0x78270000,
+       0x00000000,
+       0x78280000,
+       0x00000000,
+       0x78290000,
+       0x00000000,
+       0x782a0000,
+       0x00000000,
+       0x7b000005,
+       0x00000004,
+       0x00000001,
+       0x00000000,
+       0x00000001,
+       0x00000000,
+       0x00000000,
+       0x05000000,      /* cmds end */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,      /* state start */
+       0x00000000,
+       0x3f800000,
+       0x3f800000,
+       0x3f800000,
+       0x3f800000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,      /* state end */
+};
+
+RO_RENDERSTATE(9);
index 0a80e419b5894f1c36a1b6eb0f6078f76fe7c0f4..1d01b51ff058bd597fc636fa3530069973972104 100644 (file)
@@ -589,14 +589,10 @@ static int init_ring_common(struct intel_engine_cs *ring)
                goto out;
        }
 
-       if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
-               i915_kernel_lost_context(ring->dev);
-       else {
-               ringbuf->head = I915_READ_HEAD(ring);
-               ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-               ringbuf->space = intel_ring_space(ringbuf);
-               ringbuf->last_retired_head = -1;
-       }
+       ringbuf->head = I915_READ_HEAD(ring);
+       ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+       ringbuf->space = intel_ring_space(ringbuf);
+       ringbuf->last_retired_head = -1;
 
        memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
 
@@ -665,76 +661,109 @@ err:
        return ret;
 }
 
-static inline void intel_ring_emit_wa(struct intel_engine_cs *ring,
-                                      u32 addr, u32 value)
+static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
+                                      struct intel_context *ctx)
 {
+       int ret, i;
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_workarounds *w = &dev_priv->workarounds;
 
-       if (WARN_ON(dev_priv->num_wa_regs >= I915_MAX_WA_REGS))
-               return;
+       if (WARN_ON(w->count == 0))
+               return 0;
 
-       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-       intel_ring_emit(ring, addr);
-       intel_ring_emit(ring, value);
+       ring->gpu_caches_dirty = true;
+       ret = intel_ring_flush_all_caches(ring);
+       if (ret)
+               return ret;
 
-       dev_priv->intel_wa_regs[dev_priv->num_wa_regs].addr = addr;
-       dev_priv->intel_wa_regs[dev_priv->num_wa_regs].mask = value & 0xFFFF;
-       /* value is updated with the status of remaining bits of this
-        * register when it is read from debugfs file
-        */
-       dev_priv->intel_wa_regs[dev_priv->num_wa_regs].value = value;
-       dev_priv->num_wa_regs++;
+       ret = intel_ring_begin(ring, (w->count * 2 + 2));
+       if (ret)
+               return ret;
+
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+       for (i = 0; i < w->count; i++) {
+               intel_ring_emit(ring, w->reg[i].addr);
+               intel_ring_emit(ring, w->reg[i].value);
+       }
+       intel_ring_emit(ring, MI_NOOP);
+
+       intel_ring_advance(ring);
+
+       ring->gpu_caches_dirty = true;
+       ret = intel_ring_flush_all_caches(ring);
+       if (ret)
+               return ret;
+
+       DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
+
+       return 0;
+}
+
+static int wa_add(struct drm_i915_private *dev_priv,
+                 const u32 addr, const u32 val, const u32 mask)
+{
+       const u32 idx = dev_priv->workarounds.count;
+
+       if (WARN_ON(idx >= I915_MAX_WA_REGS))
+               return -ENOSPC;
+
+       dev_priv->workarounds.reg[idx].addr = addr;
+       dev_priv->workarounds.reg[idx].value = val;
+       dev_priv->workarounds.reg[idx].mask = mask;
 
-       return;
+       dev_priv->workarounds.count++;
+
+       return 0;
 }
 
+#define WA_REG(addr, val, mask) { \
+               const int r = wa_add(dev_priv, (addr), (val), (mask)); \
+               if (r) \
+                       return r; \
+       }
+
+#define WA_SET_BIT_MASKED(addr, mask) \
+       WA_REG(addr, _MASKED_BIT_ENABLE(mask), (mask) & 0xffff)
+
+#define WA_CLR_BIT_MASKED(addr, mask) \
+       WA_REG(addr, _MASKED_BIT_DISABLE(mask), (mask) & 0xffff)
+
+#define WA_SET_BIT(addr, mask) WA_REG(addr, I915_READ(addr) | (mask), mask)
+#define WA_CLR_BIT(addr, mask) WA_REG(addr, I915_READ(addr) & ~(mask), mask)
+
+#define WA_WRITE(addr, val) WA_REG(addr, val, 0xffffffff)
+
 static int bdw_init_workarounds(struct intel_engine_cs *ring)
 {
-       int ret;
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       /*
-        * workarounds applied in this fn are part of register state context,
-        * they need to be re-initialized followed by gpu reset, suspend/resume,
-        * module reload.
-        */
-       dev_priv->num_wa_regs = 0;
-       memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
-
-       /*
-        * update the number of dwords required based on the
-        * actual number of workarounds applied
-        */
-       ret = intel_ring_begin(ring, 18);
-       if (ret)
-               return ret;
-
        /* WaDisablePartialInstShootdown:bdw */
-       /* WaDisableThreadStallDopClockGating:bdw */
-       /* FIXME: Unclear whether we really need this on production bdw. */
-       intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
-                          _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE
-                                            | STALL_DOP_GATING_DISABLE));
+       /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
+       WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+                         PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
+                         STALL_DOP_GATING_DISABLE);
 
-       /* WaDisableDopClockGating:bdw May not be needed for production */
-       intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
-                          _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+       /* WaDisableDopClockGating:bdw */
+       WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+                         DOP_CLOCK_GATING_DISABLE);
 
-       intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
-                          _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+       WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+                         GEN8_SAMPLER_POWER_BYPASS_DIS);
 
        /* Use Force Non-Coherent whenever executing a 3D context. This is a
         * workaround for for a possible hang in the unlikely event a TLB
         * invalidation occurs during a PSD flush.
         */
-       intel_ring_emit_wa(ring, HDC_CHICKEN0,
-                          _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
+       /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
+       WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                         HDC_FORCE_NON_COHERENT |
+                         (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
 
        /* Wa4x4STCOptimizationDisable:bdw */
-       intel_ring_emit_wa(ring, CACHE_MODE_1,
-                          _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
+       WA_SET_BIT_MASKED(CACHE_MODE_1,
+                         GEN8_4x4_STC_OPTIMIZATION_DISABLE);
 
        /*
         * BSpec recommends 8x4 when MSAA is used,
@@ -744,52 +773,50 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
         * disable bit, which we don't touch here, but it's good
         * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
         */
-       intel_ring_emit_wa(ring, GEN7_GT_MODE,
-                          GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
-
-       intel_ring_advance(ring);
-
-       DRM_DEBUG_DRIVER("Number of Workarounds applied: %d\n",
-                        dev_priv->num_wa_regs);
+       WA_SET_BIT_MASKED(GEN7_GT_MODE,
+                         GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 
        return 0;
 }
 
 static int chv_init_workarounds(struct intel_engine_cs *ring)
 {
-       int ret;
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       /*
-        * workarounds applied in this fn are part of register state context,
-        * they need to be re-initialized followed by gpu reset, suspend/resume,
-        * module reload.
+       /* WaDisablePartialInstShootdown:chv */
+       /* WaDisableThreadStallDopClockGating:chv */
+       WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+                         PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
+                         STALL_DOP_GATING_DISABLE);
+
+       /* Use Force Non-Coherent whenever executing a 3D context. This is a
+        * workaround for a possible hang in the unlikely event a TLB
+        * invalidation occurs during a PSD flush.
         */
-       dev_priv->num_wa_regs = 0;
-       memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
+       /* WaForceEnableNonCoherent:chv */
+       /* WaHdcDisableFetchWhenMasked:chv */
+       WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                         HDC_FORCE_NON_COHERENT |
+                         HDC_DONOT_FETCH_MEM_WHEN_MASKED);
 
-       ret = intel_ring_begin(ring, 12);
-       if (ret)
-               return ret;
+       return 0;
+}
 
-       /* WaDisablePartialInstShootdown:chv */
-       intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
-                          _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
+int init_workarounds_ring(struct intel_engine_cs *ring)
+{
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       /* WaDisableThreadStallDopClockGating:chv */
-       intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
-                          _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+       WARN_ON(ring->id != RCS);
 
-       /* WaDisableDopClockGating:chv (pre-production hw) */
-       intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
-                          _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+       dev_priv->workarounds.count = 0;
 
-       /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
-       intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
-                          _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+       if (IS_BROADWELL(dev))
+               return bdw_init_workarounds(ring);
 
-       intel_ring_advance(ring);
+       if (IS_CHERRYVIEW(dev))
+               return chv_init_workarounds(ring);
 
        return 0;
 }
@@ -812,7 +839,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
         *
         * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
         */
-       if (INTEL_INFO(dev)->gen >= 6)
+       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
        /* Required for the hardware to program scanline values for waiting */
@@ -849,7 +876,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
        if (HAS_L3_DPF(dev))
                I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
 
-       return ret;
+       return init_workarounds_ring(ring);
 }
 
 static void render_ring_cleanup(struct intel_engine_cs *ring)
@@ -1186,7 +1213,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
-       if (!dev->irq_enabled)
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1217,7 +1244,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
-       if (!dev->irq_enabled)
+       if (!intel_irqs_enabled(dev_priv))
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1254,7 +1281,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
-       if (!dev->irq_enabled)
+       if (!intel_irqs_enabled(dev_priv))
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1388,8 +1415,8 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
-       if (!dev->irq_enabled)
-              return false;
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+               return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (ring->irq_refcount++ == 0) {
@@ -1431,7 +1458,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
-       if (!dev->irq_enabled)
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1451,9 +1478,6 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
-       if (!dev->irq_enabled)
-               return;
-
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (--ring->irq_refcount == 0) {
                I915_WRITE_IMR(ring, ~0);
@@ -1469,7 +1493,7 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
-       if (!dev->irq_enabled)
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1694,13 +1718,42 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
        return 0;
 }
 
-void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 {
-       if (!ringbuf->obj)
-               return;
-
        iounmap(ringbuf->virtual_start);
+       ringbuf->virtual_start = NULL;
        i915_gem_object_ggtt_unpin(ringbuf->obj);
+}
+
+int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+                                    struct intel_ringbuffer *ringbuf)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_gem_object *obj = ringbuf->obj;
+       int ret;
+
+       ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+       if (ret)
+               return ret;
+
+       ret = i915_gem_object_set_to_gtt_domain(obj, true);
+       if (ret) {
+               i915_gem_object_ggtt_unpin(obj);
+               return ret;
+       }
+
+       ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
+                       i915_gem_obj_ggtt_offset(obj), ringbuf->size);
+       if (ringbuf->virtual_start == NULL) {
+               i915_gem_object_ggtt_unpin(obj);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+{
        drm_gem_object_unreference(&ringbuf->obj->base);
        ringbuf->obj = NULL;
 }
@@ -1708,12 +1761,7 @@ void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 int intel_alloc_ringbuffer_obj(struct drm_device *dev,
                               struct intel_ringbuffer *ringbuf)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
-       int ret;
-
-       if (ringbuf->obj)
-               return 0;
 
        obj = NULL;
        if (!HAS_LLC(dev))
@@ -1726,30 +1774,9 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
        /* mark ring buffers as read-only from GPU side by default */
        obj->gt_ro = 1;
 
-       ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
-       if (ret)
-               goto err_unref;
-
-       ret = i915_gem_object_set_to_gtt_domain(obj, true);
-       if (ret)
-               goto err_unpin;
-
-       ringbuf->virtual_start =
-               ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
-                               ringbuf->size);
-       if (ringbuf->virtual_start == NULL) {
-               ret = -EINVAL;
-               goto err_unpin;
-       }
-
        ringbuf->obj = obj;
-       return 0;
 
-err_unpin:
-       i915_gem_object_ggtt_unpin(obj);
-err_unref:
-       drm_gem_object_unreference(&obj->base);
-       return ret;
+       return 0;
 }
 
 static int intel_init_ring_buffer(struct drm_device *dev,
@@ -1786,10 +1813,21 @@ static int intel_init_ring_buffer(struct drm_device *dev,
                        goto error;
        }
 
-       ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
-       if (ret) {
-               DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
-               goto error;
+       if (ringbuf->obj == NULL) {
+               ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+               if (ret) {
+                       DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
+                                       ring->name, ret);
+                       goto error;
+               }
+
+               ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+               if (ret) {
+                       DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
+                                       ring->name, ret);
+                       intel_destroy_ringbuffer_obj(ringbuf);
+                       goto error;
+               }
        }
 
        /* Workaround an erratum on the i830 which causes a hang if
@@ -1818,15 +1856,19 @@ error:
 
 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
 {
-       struct drm_i915_private *dev_priv = to_i915(ring->dev);
-       struct intel_ringbuffer *ringbuf = ring->buffer;
+       struct drm_i915_private *dev_priv;
+       struct intel_ringbuffer *ringbuf;
 
        if (!intel_ring_initialized(ring))
                return;
 
+       dev_priv = to_i915(ring->dev);
+       ringbuf = ring->buffer;
+
        intel_stop_ring_buffer(ring);
        WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
+       intel_unpin_ringbuffer_obj(ringbuf);
        intel_destroy_ringbuffer_obj(ringbuf);
        ring->preallocated_lazy_request = NULL;
        ring->outstanding_lazy_seqno = 0;
@@ -1912,13 +1954,6 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
                        break;
                }
 
-               if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
-                   dev->primary->master) {
-                       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-                       if (master_priv->sarea_priv)
-                               master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-               }
-
                msleep(1);
 
                if (dev_priv->mm.interruptible && signal_pending(current)) {
@@ -2229,6 +2264,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
                           u32 invalidate, u32 flush)
 {
        struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t cmd;
        int ret;
 
@@ -2259,8 +2295,12 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
        }
        intel_ring_advance(ring);
 
-       if (IS_GEN7(dev) && !invalidate && flush)
-               return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
+       if (!invalidate && flush) {
+               if (IS_GEN7(dev))
+                       return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
+               else if (IS_BROADWELL(dev))
+                       dev_priv->fbc.need_sw_cache_clean = true;
+       }
 
        return 0;
 }
@@ -2293,10 +2333,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
                                        dev_priv->semaphore_obj = obj;
                        }
                }
-               if (IS_CHERRYVIEW(dev))
-                       ring->init_context = chv_init_workarounds;
-               else
-                       ring->init_context = bdw_init_workarounds;
+
+               ring->init_context = intel_ring_workarounds_emit;
                ring->add_request = gen6_add_request;
                ring->flush = gen8_render_ring_flush;
                ring->irq_get = gen8_ring_get_irq;
@@ -2406,91 +2444,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
        return intel_init_ring_buffer(dev, ring);
 }
 
-int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
-       struct intel_ringbuffer *ringbuf = ring->buffer;
-       int ret;
-
-       if (ringbuf == NULL) {
-               ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
-               if (!ringbuf)
-                       return -ENOMEM;
-               ring->buffer = ringbuf;
-       }
-
-       ring->name = "render ring";
-       ring->id = RCS;
-       ring->mmio_base = RENDER_RING_BASE;
-
-       if (INTEL_INFO(dev)->gen >= 6) {
-               /* non-kms not supported on gen6+ */
-               ret = -ENODEV;
-               goto err_ringbuf;
-       }
-
-       /* Note: gem is not supported on gen5/ilk without kms (the corresponding
-        * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
-        * the special gen5 functions. */
-       ring->add_request = i9xx_add_request;
-       if (INTEL_INFO(dev)->gen < 4)
-               ring->flush = gen2_render_ring_flush;
-       else
-               ring->flush = gen4_render_ring_flush;
-       ring->get_seqno = ring_get_seqno;
-       ring->set_seqno = ring_set_seqno;
-       if (IS_GEN2(dev)) {
-               ring->irq_get = i8xx_ring_get_irq;
-               ring->irq_put = i8xx_ring_put_irq;
-       } else {
-               ring->irq_get = i9xx_ring_get_irq;
-               ring->irq_put = i9xx_ring_put_irq;
-       }
-       ring->irq_enable_mask = I915_USER_INTERRUPT;
-       ring->write_tail = ring_write_tail;
-       if (INTEL_INFO(dev)->gen >= 4)
-               ring->dispatch_execbuffer = i965_dispatch_execbuffer;
-       else if (IS_I830(dev) || IS_845G(dev))
-               ring->dispatch_execbuffer = i830_dispatch_execbuffer;
-       else
-               ring->dispatch_execbuffer = i915_dispatch_execbuffer;
-       ring->init = init_render_ring;
-       ring->cleanup = render_ring_cleanup;
-
-       ring->dev = dev;
-       INIT_LIST_HEAD(&ring->active_list);
-       INIT_LIST_HEAD(&ring->request_list);
-
-       ringbuf->size = size;
-       ringbuf->effective_size = ringbuf->size;
-       if (IS_I830(ring->dev) || IS_845G(ring->dev))
-               ringbuf->effective_size -= 2 * CACHELINE_BYTES;
-
-       ringbuf->virtual_start = ioremap_wc(start, size);
-       if (ringbuf->virtual_start == NULL) {
-               DRM_ERROR("can not ioremap virtual address for"
-                         " ring buffer\n");
-               ret = -ENOMEM;
-               goto err_ringbuf;
-       }
-
-       if (!I915_NEED_GFX_HWS(dev)) {
-               ret = init_phys_status_page(ring);
-               if (ret)
-                       goto err_vstart;
-       }
-
-       return 0;
-
-err_vstart:
-       iounmap(ringbuf->virtual_start);
-err_ringbuf:
-       kfree(ringbuf);
-       ring->buffer = NULL;
-       return ret;
-}
-
 int intel_init_bsd_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
index 96479c89f4bda9c2e6af0084e75b6785eee6ccfa..fe426cff598ba5f4c381cdc134a58fc857090086 100644 (file)
@@ -148,7 +148,8 @@ struct  intel_engine_cs {
 
        int             (*init)(struct intel_engine_cs *ring);
 
-       int             (*init_context)(struct intel_engine_cs *ring);
+       int             (*init_context)(struct intel_engine_cs *ring,
+                                       struct intel_context *ctx);
 
        void            (*write_tail)(struct intel_engine_cs *ring,
                                      u32 value);
@@ -235,6 +236,7 @@ struct  intel_engine_cs {
        /* Execlists */
        spinlock_t execlist_lock;
        struct list_head execlist_queue;
+       struct list_head execlist_retired_req_list;
        u8 next_context_status_buffer;
        u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
        int             (*emit_request)(struct intel_ringbuffer *ringbuf);
@@ -381,6 +383,9 @@ intel_write_status_page(struct intel_engine_cs *ring,
 #define I915_GEM_HWS_SCRATCH_INDEX     0x30
 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
+void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
+int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+                                    struct intel_ringbuffer *ringbuf);
 void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
 int intel_alloc_ringbuffer_obj(struct drm_device *dev,
                               struct intel_ringbuffer *ringbuf);
@@ -424,6 +429,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
 u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
 void intel_ring_setup_status_page(struct intel_engine_cs *ring);
 
+int init_workarounds_ring(struct intel_engine_cs *ring);
+
 static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
 {
        return ringbuf->tail;
@@ -441,7 +448,4 @@ static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
                ring->trace_irq_seqno = seqno;
 }
 
-/* DRI warts */
-int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
-
 #endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
new file mode 100644 (file)
index 0000000..f5a78d5
--- /dev/null
@@ -0,0 +1,1406 @@
+/*
+ * Copyright © 2012-2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *    Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/vgaarb.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include <drm/i915_powerwell.h>
+
+/**
+ * DOC: runtime pm
+ *
+ * The i915 driver supports dynamic enabling and disabling of entire hardware
+ * blocks at runtime. This is especially important on the display side where
+ * software is supposed to control many power gates manually on recent hardware,
+ * since on the GT side a lot of the power management is done by the hardware.
+ * But even there some manual control at the device level is required.
+ *
+ * Since i915 supports a diverse set of platforms with a unified codebase and
+ * hardware engineers just love to shuffle functionality around between power
+ * domains there's a sizeable amount of indirection required. This file provides
+ * generic functions to the driver for grabbing and releasing references for
+ * abstract power domains. It then maps those to the actual power wells
+ * present for a given platform.
+ */
+
+static struct i915_power_domains *hsw_pwr;
+
+#define for_each_power_well(i, power_well, domain_mask, power_domains) \
+       for (i = 0;                                                     \
+            i < (power_domains)->power_well_count &&                   \
+                ((power_well) = &(power_domains)->power_wells[i]);     \
+            i++)                                                       \
+               if ((power_well)->domains & (domain_mask))
+
+#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
+       for (i = (power_domains)->power_well_count - 1;                  \
+            i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
+            i--)                                                        \
+               if ((power_well)->domains & (domain_mask))
+
+/*
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       return I915_READ(HSW_PWR_WELL_DRIVER) ==
+                    (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+}
+
+/**
+ * __intel_display_power_is_enabled - unlocked check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This is the unlocked version of intel_display_power_is_enabled() and should
+ * only be used from error capture and recovery code where deadlocks are
+ * possible.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+                                     enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains;
+       struct i915_power_well *power_well;
+       bool is_enabled;
+       int i;
+
+       if (dev_priv->pm.suspended)
+               return false;
+
+       power_domains = &dev_priv->power_domains;
+
+       is_enabled = true;
+
+       for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+               if (power_well->always_on)
+                       continue;
+
+               if (!power_well->hw_enabled) {
+                       is_enabled = false;
+                       break;
+               }
+       }
+
+       return is_enabled;
+}
+
+/**
+ * intel_display_power_is_enabled - unlocked check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This function can be used to check the hw power domain state. It is mostly
+ * used in hardware state readout functions. Everywhere else code should rely
+ * upon explicit power domain reference counting to ensure that the hardware
+ * block is powered up before accessing it.
+ *
+ * Callers must hold the relevant modesetting locks to ensure that concurrent
+ * threads can't disable the power well while the caller tries to read a few
+ * registers.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+                                   enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains;
+       bool ret;
+
+       power_domains = &dev_priv->power_domains;
+
+       mutex_lock(&power_domains->lock);
+       ret = __intel_display_power_is_enabled(dev_priv, domain);
+       mutex_unlock(&power_domains->lock);
+
+       return ret;
+}
+
+/**
+ * intel_display_set_init_power - set the initial power domain state
+ * @dev_priv: i915 device instance
+ * @enable: whether to enable or disable the initial power domain state
+ *
+ * For simplicity our driver load/unload and system suspend/resume code assumes
+ * that all power domains are always enabled. This functions controls the state
+ * of this little hack. While the initial power domain state is enabled runtime
+ * pm is effectively disabled.
+ */
+void intel_display_set_init_power(struct drm_i915_private *dev_priv,
+                                 bool enable)
+{
+       if (dev_priv->power_domains.init_power_on == enable)
+               return;
+
+       if (enable)
+               intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+       else
+               intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+       dev_priv->power_domains.init_power_on = enable;
+}
+
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+
+       /*
+        * After we re-enable the power well, if we touch VGA register 0x3d5
+        * we'll get unclaimed register interrupts. This stops after we write
+        * anything to the VGA MSR register. The vgacon module uses this
+        * register all the time, so if we unbind our driver and, as a
+        * consequence, bind vgacon, we'll get stuck in an infinite loop at
+        * console_unlock(). So make here we touch the VGA MSR register, making
+        * sure vgacon can keep working normally without triggering interrupts
+        * and error messages.
+        */
+       vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+       outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+       vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+
+       if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
+               gen8_irq_power_well_post_enable(dev_priv);
+}
+
+static void hsw_set_power_well(struct drm_i915_private *dev_priv,
+                              struct i915_power_well *power_well, bool enable)
+{
+       bool is_enabled, enable_requested;
+       uint32_t tmp;
+
+       tmp = I915_READ(HSW_PWR_WELL_DRIVER);
+       is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
+       enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
+
+       if (enable) {
+               if (!enable_requested)
+                       I915_WRITE(HSW_PWR_WELL_DRIVER,
+                                  HSW_PWR_WELL_ENABLE_REQUEST);
+
+               if (!is_enabled) {
+                       DRM_DEBUG_KMS("Enabling power well\n");
+                       if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
+                                     HSW_PWR_WELL_STATE_ENABLED), 20))
+                               DRM_ERROR("Timeout enabling power well\n");
+                       hsw_power_well_post_enable(dev_priv);
+               }
+
+       } else {
+               if (enable_requested) {
+                       I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+                       POSTING_READ(HSW_PWR_WELL_DRIVER);
+                       DRM_DEBUG_KMS("Requesting to disable the power well\n");
+               }
+       }
+}
+
+static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
+
+       /*
+        * We're taking over the BIOS, so clear any requests made by it since
+        * the driver is in charge now.
+        */
+       if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
+               I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+}
+
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+                                 struct i915_power_well *power_well)
+{
+       hsw_set_power_well(dev_priv, power_well, true);
+}
+
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       hsw_set_power_well(dev_priv, power_well, false);
+}
+
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+}
+
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+                                            struct i915_power_well *power_well)
+{
+       return true;
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+                              struct i915_power_well *power_well, bool enable)
+{
+       enum punit_power_well power_well_id = power_well->data;
+       u32 mask;
+       u32 state;
+       u32 ctrl;
+
+       mask = PUNIT_PWRGT_MASK(power_well_id);
+       state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
+                        PUNIT_PWRGT_PWR_GATE(power_well_id);
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+       ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+
+       if (COND)
+               goto out;
+
+       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+       ctrl &= ~mask;
+       ctrl |= state;
+       vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+
+       if (wait_for(COND, 100))
+               DRM_ERROR("timout setting power well state %08x (%08x)\n",
+                         state,
+                         vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+
+#undef COND
+
+out:
+       mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+                                 struct i915_power_well *power_well)
+{
+       vlv_set_power_well(dev_priv, power_well, true);
+}
+
+static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       int power_well_id = power_well->data;
+       bool enabled = false;
+       u32 mask;
+       u32 state;
+       u32 ctrl;
+
+       mask = PUNIT_PWRGT_MASK(power_well_id);
+       ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+       state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+       /*
+        * We only ever set the power-on and power-gate states, anything
+        * else is unexpected.
+        */
+       WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
+               state != PUNIT_PWRGT_PWR_GATE(power_well_id));
+       if (state == ctrl)
+               enabled = true;
+
+       /*
+        * A transient state at this point would mean some unexpected party
+        * is poking at the power controls too.
+        */
+       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+       WARN_ON(ctrl != state);
+
+       mutex_unlock(&dev_priv->rps.hw_lock);
+
+       return enabled;
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+                                         struct i915_power_well *power_well)
+{
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+       vlv_set_power_well(dev_priv, power_well, true);
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       valleyview_enable_display_irqs(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       /*
+        * During driver initialization/resume we can avoid restoring the
+        * part of the HW/SW state that will be inited anyway explicitly.
+        */
+       if (dev_priv->power_domains.initializing)
+               return;
+
+       intel_hpd_init(dev_priv);
+
+       i915_redisable_vga_power_on(dev_priv->dev);
+}
+
+static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       valleyview_disable_display_irqs(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       vlv_set_power_well(dev_priv, power_well, false);
+
+       vlv_power_sequencer_reset(dev_priv);
+}
+
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+       /*
+        * Enable the CRI clock source so we can get at the
+        * display and the reference clock for VGA
+        * hotplug / manual detection.
+        */
+       I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+                  DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+       vlv_set_power_well(dev_priv, power_well, true);
+
+       /*
+        * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+        *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
+        *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
+        *   b. The other bits such as sfr settings / modesel may all
+        *      be set to 0.
+        *
+        * This should only be done on init and resume from S3 with
+        * both PLLs disabled, or we risk losing DPIO and PLL
+        * synchronization.
+        */
+       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+                                           struct i915_power_well *power_well)
+{
+       enum pipe pipe;
+
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+       for_each_pipe(dev_priv, pipe)
+               assert_pll_disabled(dev_priv, pipe);
+
+       /* Assert common reset */
+       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+
+       vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+       enum dpio_phy phy;
+
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+                    power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+
+       /*
+        * Enable the CRI clock source so we can get at the
+        * display and the reference clock for VGA
+        * hotplug / manual detection.
+        */
+       if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+               phy = DPIO_PHY0;
+               I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+                          DPLL_REFA_CLK_ENABLE_VLV);
+               I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+                          DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+       } else {
+               phy = DPIO_PHY1;
+               I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
+                          DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+       }
+       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+       vlv_set_power_well(dev_priv, power_well, true);
+
+       /* Poll for phypwrgood signal */
+       if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
+               DRM_ERROR("Display PHY %d is not power up\n", phy);
+
+       I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
+                  PHY_COM_LANE_RESET_DEASSERT(phy));
+}
+
+static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+                                           struct i915_power_well *power_well)
+{
+       enum dpio_phy phy;
+
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+                    power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+
+       if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+               phy = DPIO_PHY0;
+               assert_pll_disabled(dev_priv, PIPE_A);
+               assert_pll_disabled(dev_priv, PIPE_B);
+       } else {
+               phy = DPIO_PHY1;
+               assert_pll_disabled(dev_priv, PIPE_C);
+       }
+
+       I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
+                  ~PHY_COM_LANE_RESET_DEASSERT(phy));
+
+       vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+                                       struct i915_power_well *power_well)
+{
+       enum pipe pipe = power_well->data;
+       bool enabled;
+       u32 state, ctrl;
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+       state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
+       /*
+        * We only ever set the power-on and power-gate states, anything
+        * else is unexpected.
+        */
+       WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
+       enabled = state == DP_SSS_PWR_ON(pipe);
+
+       /*
+        * A transient state at this point would mean some unexpected party
+        * is poking at the power controls too.
+        */
+       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
+       WARN_ON(ctrl << 16 != state);
+
+       mutex_unlock(&dev_priv->rps.hw_lock);
+
+       return enabled;
+}
+
+static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+                                   struct i915_power_well *power_well,
+                                   bool enable)
+{
+       enum pipe pipe = power_well->data;
+       u32 state;
+       u32 ctrl;
+
+       state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+       ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
+
+       if (COND)
+               goto out;
+
+       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+       ctrl &= ~DP_SSC_MASK(pipe);
+       ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
+       vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
+
+       if (wait_for(COND, 100))
+               DRM_ERROR("timout setting power well state %08x (%08x)\n",
+                         state,
+                         vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
+
+#undef COND
+
+out:
+       mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+                                       struct i915_power_well *power_well)
+{
+       chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+                                      struct i915_power_well *power_well)
+{
+       WARN_ON_ONCE(power_well->data != PIPE_A &&
+                    power_well->data != PIPE_B &&
+                    power_well->data != PIPE_C);
+
+       chv_set_pipe_power_well(dev_priv, power_well, true);
+
+       if (power_well->data == PIPE_A) {
+               spin_lock_irq(&dev_priv->irq_lock);
+               valleyview_enable_display_irqs(dev_priv);
+               spin_unlock_irq(&dev_priv->irq_lock);
+
+               /*
+                * During driver initialization/resume we can avoid restoring the
+                * part of the HW/SW state that will be inited anyway explicitly.
+                */
+               if (dev_priv->power_domains.initializing)
+                       return;
+
+               intel_hpd_init(dev_priv);
+
+               i915_redisable_vga_power_on(dev_priv->dev);
+       }
+}
+
+static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+                                       struct i915_power_well *power_well)
+{
+       WARN_ON_ONCE(power_well->data != PIPE_A &&
+                    power_well->data != PIPE_B &&
+                    power_well->data != PIPE_C);
+
+       if (power_well->data == PIPE_A) {
+               spin_lock_irq(&dev_priv->irq_lock);
+               valleyview_disable_display_irqs(dev_priv);
+               spin_unlock_irq(&dev_priv->irq_lock);
+       }
+
+       chv_set_pipe_power_well(dev_priv, power_well, false);
+
+       if (power_well->data == PIPE_A)
+               vlv_power_sequencer_reset(dev_priv);
+}
+
+static void check_power_well_state(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
+
+       if (power_well->always_on || !i915.disable_power_well) {
+               if (!enabled)
+                       goto mismatch;
+
+               return;
+       }
+
+       if (enabled != (power_well->count > 0))
+               goto mismatch;
+
+       return;
+
+mismatch:
+       WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
+                 power_well->name, power_well->always_on, enabled,
+                 power_well->count, i915.disable_power_well);
+}
+
+/**
+ * intel_display_power_get - grab a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+void intel_display_power_get(struct drm_i915_private *dev_priv,
+                            enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains;
+       struct i915_power_well *power_well;
+       int i;
+
+       intel_runtime_pm_get(dev_priv);
+
+       power_domains = &dev_priv->power_domains;
+
+       mutex_lock(&power_domains->lock);
+
+       for_each_power_well(i, power_well, BIT(domain), power_domains) {
+               if (!power_well->count++) {
+                       DRM_DEBUG_KMS("enabling %s\n", power_well->name);
+                       power_well->ops->enable(dev_priv, power_well);
+                       power_well->hw_enabled = true;
+               }
+
+               check_power_well_state(dev_priv, power_well);
+       }
+
+       power_domains->domain_use_count[domain]++;
+
+       mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_put - release a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ */
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+                            enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains;
+       struct i915_power_well *power_well;
+       int i;
+
+       power_domains = &dev_priv->power_domains;
+
+       mutex_lock(&power_domains->lock);
+
+       WARN_ON(!power_domains->domain_use_count[domain]);
+       power_domains->domain_use_count[domain]--;
+
+       for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+               WARN_ON(!power_well->count);
+
+               if (!--power_well->count && i915.disable_power_well) {
+                       DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+                       power_well->hw_enabled = false;
+                       power_well->ops->disable(dev_priv, power_well);
+               }
+
+               check_power_well_state(dev_priv, power_well);
+       }
+
+       mutex_unlock(&power_domains->lock);
+
+       intel_runtime_pm_put(dev_priv);
+}
+
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
+#define HSW_ALWAYS_ON_POWER_DOMAINS (                  \
+       BIT(POWER_DOMAIN_PIPE_A) |                      \
+       BIT(POWER_DOMAIN_TRANSCODER_EDP) |              \
+       BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_CRT) |                    \
+       BIT(POWER_DOMAIN_PLLS) |                        \
+       BIT(POWER_DOMAIN_INIT))
+#define HSW_DISPLAY_POWER_DOMAINS (                            \
+       (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |    \
+       BIT(POWER_DOMAIN_INIT))
+
+#define BDW_ALWAYS_ON_POWER_DOMAINS (                  \
+       HSW_ALWAYS_ON_POWER_DOMAINS |                   \
+       BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
+#define BDW_DISPLAY_POWER_DOMAINS (                            \
+       (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |    \
+       BIT(POWER_DOMAIN_INIT))
+
+#define VLV_ALWAYS_ON_POWER_DOMAINS    BIT(POWER_DOMAIN_INIT)
+#define VLV_DISPLAY_POWER_DOMAINS      POWER_DOMAIN_MASK
+
+#define VLV_DPIO_CMN_BC_POWER_DOMAINS (                \
+       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_CRT) |            \
+       BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
+       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
+       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
+       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
+       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_A_POWER_DOMAINS (     \
+       BIT(POWER_DOMAIN_PIPE_A) |      \
+       BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_B_POWER_DOMAINS (     \
+       BIT(POWER_DOMAIN_PIPE_B) |      \
+       BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_C_POWER_DOMAINS (     \
+       BIT(POWER_DOMAIN_PIPE_C) |      \
+       BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_BC_POWER_DOMAINS (                \
+       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_D_POWER_DOMAINS (         \
+       BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
+       BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
+       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+       .sync_hw = i9xx_always_on_power_well_noop,
+       .enable = i9xx_always_on_power_well_noop,
+       .disable = i9xx_always_on_power_well_noop,
+       .is_enabled = i9xx_always_on_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_pipe_power_well_ops = {
+       .sync_hw = chv_pipe_power_well_sync_hw,
+       .enable = chv_pipe_power_well_enable,
+       .disable = chv_pipe_power_well_disable,
+       .is_enabled = chv_pipe_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
+       .sync_hw = vlv_power_well_sync_hw,
+       .enable = chv_dpio_cmn_power_well_enable,
+       .disable = chv_dpio_cmn_power_well_disable,
+       .is_enabled = vlv_power_well_enabled,
+};
+
+static struct i915_power_well i9xx_always_on_power_well[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+       },
+};
+
+static const struct i915_power_well_ops hsw_power_well_ops = {
+       .sync_hw = hsw_power_well_sync_hw,
+       .enable = hsw_power_well_enable,
+       .disable = hsw_power_well_disable,
+       .is_enabled = hsw_power_well_enabled,
+};
+
+static struct i915_power_well hsw_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+               .ops = &i9xx_always_on_power_well_ops,
+       },
+       {
+               .name = "display",
+               .domains = HSW_DISPLAY_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+       },
+};
+
+static struct i915_power_well bdw_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+               .ops = &i9xx_always_on_power_well_ops,
+       },
+       {
+               .name = "display",
+               .domains = BDW_DISPLAY_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+       },
+};
+
+static const struct i915_power_well_ops vlv_display_power_well_ops = {
+       .sync_hw = vlv_power_well_sync_hw,
+       .enable = vlv_display_power_well_enable,
+       .disable = vlv_display_power_well_disable,
+       .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+       .sync_hw = vlv_power_well_sync_hw,
+       .enable = vlv_dpio_cmn_power_well_enable,
+       .disable = vlv_dpio_cmn_power_well_disable,
+       .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
+       .sync_hw = vlv_power_well_sync_hw,
+       .enable = vlv_power_well_enable,
+       .disable = vlv_power_well_disable,
+       .is_enabled = vlv_power_well_enabled,
+};
+
+static struct i915_power_well vlv_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+               .ops = &i9xx_always_on_power_well_ops,
+       },
+       {
+               .name = "display",
+               .domains = VLV_DISPLAY_POWER_DOMAINS,
+               .data = PUNIT_POWER_WELL_DISP2D,
+               .ops = &vlv_display_power_well_ops,
+       },
+       {
+               .name = "dpio-tx-b-01",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+       },
+       {
+               .name = "dpio-tx-b-23",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+       },
+       {
+               .name = "dpio-tx-c-01",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+       },
+       {
+               .name = "dpio-tx-c-23",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+       },
+       {
+               .name = "dpio-common",
+               .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
+               .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+               .ops = &vlv_dpio_cmn_power_well_ops,
+       },
+};
+
+static struct i915_power_well chv_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+               .ops = &i9xx_always_on_power_well_ops,
+       },
+#if 0
+       {
+               .name = "display",
+               .domains = VLV_DISPLAY_POWER_DOMAINS,
+               .data = PUNIT_POWER_WELL_DISP2D,
+               .ops = &vlv_display_power_well_ops,
+       },
+#endif
+       {
+               .name = "pipe-a",
+               /*
+                * FIXME: pipe A power well seems to be the new disp2d well.
+                * At least all registers seem to be housed there. Figure
+                * out if this a a temporary situation in pre-production
+                * hardware or a permanent state of affairs.
+                */
+               .domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS,
+               .data = PIPE_A,
+               .ops = &chv_pipe_power_well_ops,
+       },
+#if 0
+       {
+               .name = "pipe-b",
+               .domains = CHV_PIPE_B_POWER_DOMAINS,
+               .data = PIPE_B,
+               .ops = &chv_pipe_power_well_ops,
+       },
+       {
+               .name = "pipe-c",
+               .domains = CHV_PIPE_C_POWER_DOMAINS,
+               .data = PIPE_C,
+               .ops = &chv_pipe_power_well_ops,
+       },
+#endif
+       {
+               .name = "dpio-common-bc",
+               /*
+                * XXX: cmnreset for one PHY seems to disturb the other.
+                * As a workaround keep both powered on at the same
+                * time for now.
+                */
+               .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
+               .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+               .ops = &chv_dpio_cmn_power_well_ops,
+       },
+       {
+               .name = "dpio-common-d",
+               /*
+                * XXX: cmnreset for one PHY seems to disturb the other.
+                * As a workaround keep both powered on at the same
+                * time for now.
+                */
+               .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
+               .data = PUNIT_POWER_WELL_DPIO_CMN_D,
+               .ops = &chv_dpio_cmn_power_well_ops,
+       },
+#if 0
+       {
+               .name = "dpio-tx-b-01",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+       },
+       {
+               .name = "dpio-tx-b-23",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+       },
+       {
+               .name = "dpio-tx-c-01",
+               .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+       },
+       {
+               .name = "dpio-tx-c-23",
+               .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+       },
+       {
+               .name = "dpio-tx-d-01",
+               .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
+                          CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
+       },
+       {
+               .name = "dpio-tx-d-23",
+               .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
+                          CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
+       },
+#endif
+};
+
+static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
+                                                enum punit_power_well power_well_id)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *power_well;
+       int i;
+
+       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+               if (power_well->data == power_well_id)
+                       return power_well;
+       }
+
+       return NULL;
+}
+
+#define set_power_wells(power_domains, __power_wells) ({               \
+       (power_domains)->power_wells = (__power_wells);                 \
+       (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
+})
+
+/**
+ * intel_power_domains_init - initializes the power domain structures
+ * @dev_priv: i915 device instance
+ *
+ * Initializes the power domain structures for @dev_priv depending upon the
+ * supported platform.
+ */
+int intel_power_domains_init(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+       mutex_init(&power_domains->lock);
+
+       /*
+        * The enabling order will be from lower to higher indexed wells,
+        * the disabling order is reversed.
+        */
+       if (IS_HASWELL(dev_priv->dev)) {
+               set_power_wells(power_domains, hsw_power_wells);
+               hsw_pwr = power_domains;
+       } else if (IS_BROADWELL(dev_priv->dev)) {
+               set_power_wells(power_domains, bdw_power_wells);
+               hsw_pwr = power_domains;
+       } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+               set_power_wells(power_domains, chv_power_wells);
+       } else if (IS_VALLEYVIEW(dev_priv->dev)) {
+               set_power_wells(power_domains, vlv_power_wells);
+       } else {
+               set_power_wells(power_domains, i9xx_always_on_power_well);
+       }
+
+       return 0;
+}
+
+static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct device *device = &dev->pdev->dev;
+
+       if (!HAS_RUNTIME_PM(dev))
+               return;
+
+       if (!intel_enable_rc6(dev))
+               return;
+
+       /* Make sure we're not suspended first. */
+       pm_runtime_get_sync(device);
+       pm_runtime_disable(device);
+}
+
+/**
+ * intel_power_domains_fini - finalizes the power domain structures
+ * @dev_priv: i915 device instance
+ *
+ * Finalizes the power domain structures for @dev_priv depending upon the
+ * supported platform. This function also disables runtime pm and ensures that
+ * the device stays powered up so that the driver can be reloaded.
+ */
+void intel_power_domains_fini(struct drm_i915_private *dev_priv)
+{
+       intel_runtime_pm_disable(dev_priv);
+
+       /* The i915.ko module is still not prepared to be loaded when
+        * the power well is not enabled, so just enable it in case
+        * we're going to unload/reload. */
+       intel_display_set_init_power(dev_priv, true);
+
+       hsw_pwr = NULL;
+}
+
+static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *power_well;
+       int i;
+
+       mutex_lock(&power_domains->lock);
+       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+               power_well->ops->sync_hw(dev_priv, power_well);
+               power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
+                                                                    power_well);
+       }
+       mutex_unlock(&power_domains->lock);
+}
+
+static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_well *cmn =
+               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+       struct i915_power_well *disp2d =
+               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
+
+       /* If the display might be already active skip this */
+       if (cmn->ops->is_enabled(dev_priv, cmn) &&
+           disp2d->ops->is_enabled(dev_priv, disp2d) &&
+           I915_READ(DPIO_CTL) & DPIO_CMNRST)
+               return;
+
+       DRM_DEBUG_KMS("toggling display PHY side reset\n");
+
+       /* cmnlane needs DPLL registers */
+       disp2d->ops->enable(dev_priv, disp2d);
+
+       /*
+        * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+        * Need to assert and de-assert PHY SB reset by gating the
+        * common lane power, then un-gating it.
+        * Simply ungating isn't enough to reset the PHY enough to get
+        * ports and lanes running.
+        */
+       cmn->ops->disable(dev_priv, cmn);
+}
+
+/**
+ * intel_power_domains_init_hw - initialize hardware power domain state
+ * @dev_priv: i915 device instance
+ *
+ * This function initializes the hardware power domain state and enables all
+ * power domains using intel_display_set_init_power().
+ */
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+       power_domains->initializing = true;
+
+       if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+               mutex_lock(&power_domains->lock);
+               vlv_cmnlane_wa(dev_priv);
+               mutex_unlock(&power_domains->lock);
+       }
+
+       /* For now, we need the power well to be always enabled. */
+       intel_display_set_init_power(dev_priv, true);
+       intel_power_domains_resume(dev_priv);
+       power_domains->initializing = false;
+}
+
+/**
+ * intel_aux_display_runtime_get - grab an auxilliary power domain reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a power domain reference for the auxiliary power domain
+ * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
+ * parents are powered up. Therefore users should only grab a reference to the
+ * innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_aux_display_runtime_put() to release the reference again.
+ */
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
+{
+       intel_runtime_pm_get(dev_priv);
+}
+
+/**
+ * intel_aux_display_runtime_put - release an auxilliary power domain reference
+ * @dev_priv: i915 device instance
+ *
+ * This function drops the auxilliary power domain reference obtained by
+ * intel_aux_display_runtime_get() and might power down the corresponding
+ * hardware block right away if this is the last reference.
+ */
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
+{
+       intel_runtime_pm_put(dev_priv);
+}
+
+/**
+ * intel_runtime_pm_get - grab a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference (mostly used for GEM
+ * code to ensure the GTT or GT is on) and ensures that it is powered up.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct device *device = &dev->pdev->dev;
+
+       if (!HAS_RUNTIME_PM(dev))
+               return;
+
+       pm_runtime_get_sync(device);
+       WARN(dev_priv->pm.suspended, "Device still suspended.\n");
+}
+
+/**
+ * intel_runtime_pm_get_noresume - grab a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference (mostly used for GEM
+ * code to ensure the GTT or GT is on).
+ *
+ * It will _not_ power up the device but instead only check that it's powered
+ * on.  Therefore it is only valid to call this functions from contexts where
+ * the device is known to be powered up and where trying to power it up would
+ * result in hilarity and deadlocks. That pretty much means only the system
+ * suspend/resume code where this is used to grab runtime pm references for
+ * delayed setup down in work items.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct device *device = &dev->pdev->dev;
+
+       if (!HAS_RUNTIME_PM(dev))
+               return;
+
+       WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
+       pm_runtime_get_noresume(device);
+}
+
+/**
+ * intel_runtime_pm_put - release a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function drops the device-level runtime pm reference obtained by
+ * intel_runtime_pm_get() and might power down the corresponding
+ * hardware block right away if this is the last reference.
+ */
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct device *device = &dev->pdev->dev;
+
+       if (!HAS_RUNTIME_PM(dev))
+               return;
+
+       pm_runtime_mark_last_busy(device);
+       pm_runtime_put_autosuspend(device);
+}
+
+/**
+ * intel_runtime_pm_enable - enable runtime pm
+ * @dev_priv: i915 device instance
+ *
+ * This function enables runtime pm at the end of the driver load sequence.
+ *
+ * Note that this function does currently not enable runtime pm for the
+ * subordinate display power domains. That is only done on the first modeset
+ * using intel_display_set_init_power().
+ */
+void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct device *device = &dev->pdev->dev;
+
+       if (!HAS_RUNTIME_PM(dev))
+               return;
+
+       pm_runtime_set_active(device);
+
+       /*
+        * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
+        * requirement.
+        */
+       if (!intel_enable_rc6(dev)) {
+               DRM_INFO("RC6 disabled, disabling runtime PM support\n");
+               return;
+       }
+
+       pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
+       pm_runtime_mark_last_busy(device);
+       pm_runtime_use_autosuspend(device);
+
+       pm_runtime_put_autosuspend(device);
+}
+
+/* Display audio driver power well request */
+int i915_request_power_well(void)
+{
+       struct drm_i915_private *dev_priv;
+
+       if (!hsw_pwr)
+               return -ENODEV;
+
+       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+                               power_domains);
+       intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(i915_request_power_well);
+
+/* Display audio driver power well release */
+int i915_release_power_well(void)
+{
+       struct drm_i915_private *dev_priv;
+
+       if (!hsw_pwr)
+               return -ENODEV;
+
+       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+                               power_domains);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(i915_release_power_well);
+
+/*
+ * Private interface for the audio driver to get CDCLK in kHz.
+ *
+ * Caller must request power well using i915_request_power_well() prior to
+ * making the call.
+ */
+int i915_get_cdclk_freq(void)
+{
+       struct drm_i915_private *dev_priv;
+
+       if (!hsw_pwr)
+               return -ENODEV;
+
+       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+                               power_domains);
+
+       return intel_ddi_get_cdclk_freq(dev_priv);
+}
+EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
index 9350edd6728d4706b82d13f1133ad22e09a0708b..6d7a277458b5cf0f24a3e97f2b60463f75585d2e 100644 (file)
@@ -1991,57 +1991,10 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
        return !list_empty(&connector->probed_modes);
 }
 
-static void
-intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
-{
-       struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
-       struct drm_device *dev = connector->dev;
-
-       if (intel_sdvo_connector->left)
-               drm_property_destroy(dev, intel_sdvo_connector->left);
-       if (intel_sdvo_connector->right)
-               drm_property_destroy(dev, intel_sdvo_connector->right);
-       if (intel_sdvo_connector->top)
-               drm_property_destroy(dev, intel_sdvo_connector->top);
-       if (intel_sdvo_connector->bottom)
-               drm_property_destroy(dev, intel_sdvo_connector->bottom);
-       if (intel_sdvo_connector->hpos)
-               drm_property_destroy(dev, intel_sdvo_connector->hpos);
-       if (intel_sdvo_connector->vpos)
-               drm_property_destroy(dev, intel_sdvo_connector->vpos);
-       if (intel_sdvo_connector->saturation)
-               drm_property_destroy(dev, intel_sdvo_connector->saturation);
-       if (intel_sdvo_connector->contrast)
-               drm_property_destroy(dev, intel_sdvo_connector->contrast);
-       if (intel_sdvo_connector->hue)
-               drm_property_destroy(dev, intel_sdvo_connector->hue);
-       if (intel_sdvo_connector->sharpness)
-               drm_property_destroy(dev, intel_sdvo_connector->sharpness);
-       if (intel_sdvo_connector->flicker_filter)
-               drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
-       if (intel_sdvo_connector->flicker_filter_2d)
-               drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
-       if (intel_sdvo_connector->flicker_filter_adaptive)
-               drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
-       if (intel_sdvo_connector->tv_luma_filter)
-               drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
-       if (intel_sdvo_connector->tv_chroma_filter)
-               drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
-       if (intel_sdvo_connector->dot_crawl)
-               drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
-       if (intel_sdvo_connector->brightness)
-               drm_property_destroy(dev, intel_sdvo_connector->brightness);
-}
-
 static void intel_sdvo_destroy(struct drm_connector *connector)
 {
        struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
 
-       if (intel_sdvo_connector->tv_format)
-               drm_property_destroy(connector->dev,
-                                    intel_sdvo_connector->tv_format);
-
-       intel_sdvo_destroy_enhance_property(connector);
        drm_connector_cleanup(connector);
        kfree(intel_sdvo_connector);
 }
index 07a74ef589bd00636896510d245d6e149a49d653..7d9c340f7693a8ff950541213e7a0d4e8f61aa06 100644 (file)
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
+static bool
+format_is_yuv(uint32_t format)
+{
+       switch (format) {
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+       case DRM_FORMAT_YVYU:
+               return true;
+       default:
+               return false;
+       }
+}
+
 static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
 {
        /* paranoia */
@@ -46,7 +60,23 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
        return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal);
 }
 
-static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
+/**
+ * intel_pipe_update_start() - start update of a set of display registers
+ * @crtc: the crtc of which the registers are going to be updated
+ * @start_vbl_count: vblank counter return pointer used for error checking
+ *
+ * Mark the start of an update to pipe registers that should be updated
+ * atomically regarding vblank. If the next vblank will happens within
+ * the next 100 us, this function waits until the vblank passes.
+ *
+ * After a successful call to this function, interrupts will be disabled
+ * until a subsequent call to intel_pipe_update_end(). That is done to
+ * avoid random delays. The value written to @start_vbl_count should be
+ * supplied to intel_pipe_update_end() for error checking.
+ *
+ * Return: true if the call was successful
+ */
+bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
 {
        struct drm_device *dev = crtc->base.dev;
        const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
@@ -56,8 +86,6 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
        wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
        DEFINE_WAIT(wait);
 
-       WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
-
        vblank_start = mode->crtc_vblank_start;
        if (mode->flags & DRM_MODE_FLAG_INTERLACE)
                vblank_start = DIV_ROUND_UP(vblank_start, 2);
@@ -112,7 +140,16 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
        return true;
 }
 
-static void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
+/**
+ * intel_pipe_update_end() - end update of a set of display registers
+ * @crtc: the crtc of which the registers were updated
+ * @start_vbl_count: start vblank counter (used for error checking)
+ *
+ * Mark the end of an update started with intel_pipe_update_start(). This
+ * re-enables interrupts and verifies the update was actually completed
+ * before a vblank using the value of @start_vbl_count.
+ */
+void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
 {
        struct drm_device *dev = crtc->base.dev;
        enum pipe pipe = crtc->pipe;
@@ -138,6 +175,226 @@ static void intel_update_primary_plane(struct intel_crtc *crtc)
                I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
 }
 
+static void
+skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
+                struct drm_framebuffer *fb,
+                struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+                unsigned int crtc_w, unsigned int crtc_h,
+                uint32_t x, uint32_t y,
+                uint32_t src_w, uint32_t src_h)
+{
+       struct drm_device *dev = drm_plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+       const int pipe = intel_plane->pipe;
+       const int plane = intel_plane->plane + 1;
+       u32 plane_ctl, stride;
+       int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+
+       plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+
+       /* Mask out pixel format bits in case we change it */
+       plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
+       plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
+       plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
+       plane_ctl &= ~PLANE_CTL_TILED_MASK;
+       plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
+       plane_ctl &= ~PLANE_CTL_ROTATE_MASK;
+
+       /* Trickle feed has to be enabled */
+       plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
+
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_RGB565:
+               plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
+               break;
+       case DRM_FORMAT_XBGR8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+               break;
+       /*
+        * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
+        * to be already pre-multiplied. We need to add a knob (or a different
+        * DRM_FORMAT) for user-space to configure that.
+        */
+       case DRM_FORMAT_ABGR8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
+                            PLANE_CTL_ORDER_RGBX |
+                            PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+               break;
+       case DRM_FORMAT_ARGB8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
+                            PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+               break;
+       case DRM_FORMAT_YUYV:
+               plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
+               break;
+       case DRM_FORMAT_YVYU:
+               plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
+               break;
+       case DRM_FORMAT_UYVY:
+               plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
+               break;
+       case DRM_FORMAT_VYUY:
+               plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
+               break;
+       default:
+               BUG();
+       }
+
+       switch (obj->tiling_mode) {
+       case I915_TILING_NONE:
+               stride = fb->pitches[0] >> 6;
+               break;
+       case I915_TILING_X:
+               plane_ctl |= PLANE_CTL_TILED_X;
+               stride = fb->pitches[0] >> 9;
+               break;
+       default:
+               BUG();
+       }
+       if (intel_plane->rotation == BIT(DRM_ROTATE_180))
+               plane_ctl |= PLANE_CTL_ROTATE_180;
+
+       plane_ctl |= PLANE_CTL_ENABLE;
+       plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+
+       intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
+                                      pixel_size, true,
+                                      src_w != crtc_w || src_h != crtc_h);
+
+       /* Sizes are 0 based */
+       src_w--;
+       src_h--;
+       crtc_w--;
+       crtc_h--;
+
+       I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
+       I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
+       I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
+       I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
+       I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
+       I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
+       POSTING_READ(PLANE_SURF(pipe, plane));
+}
+
+static void
+skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
+{
+       struct drm_device *dev = drm_plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+       const int pipe = intel_plane->pipe;
+       const int plane = intel_plane->plane + 1;
+
+       I915_WRITE(PLANE_CTL(pipe, plane),
+                  I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
+
+       /* Activate double buffered register update */
+       I915_WRITE(PLANE_CTL(pipe, plane), 0);
+       POSTING_READ(PLANE_CTL(pipe, plane));
+
+       intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
+}
+
+static int
+skl_update_colorkey(struct drm_plane *drm_plane,
+                   struct drm_intel_sprite_colorkey *key)
+{
+       struct drm_device *dev = drm_plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+       const int pipe = intel_plane->pipe;
+       const int plane = intel_plane->plane;
+       u32 plane_ctl;
+
+       I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
+       I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
+       I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
+
+       plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+       plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
+       if (key->flags & I915_SET_COLORKEY_DESTINATION)
+               plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+       else if (key->flags & I915_SET_COLORKEY_SOURCE)
+               plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+       I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
+
+       POSTING_READ(PLANE_CTL(pipe, plane));
+
+       return 0;
+}
+
+static void
+skl_get_colorkey(struct drm_plane *drm_plane,
+                struct drm_intel_sprite_colorkey *key)
+{
+       struct drm_device *dev = drm_plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+       const int pipe = intel_plane->pipe;
+       const int plane = intel_plane->plane;
+       u32 plane_ctl;
+
+       key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
+       key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
+       key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
+
+       plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+
+       switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
+       case PLANE_CTL_KEY_ENABLE_DESTINATION:
+               key->flags = I915_SET_COLORKEY_DESTINATION;
+               break;
+       case PLANE_CTL_KEY_ENABLE_SOURCE:
+               key->flags = I915_SET_COLORKEY_SOURCE;
+               break;
+       default:
+               key->flags = I915_SET_COLORKEY_NONE;
+       }
+}
+
+static void
+chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
+{
+       struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private;
+       int plane = intel_plane->plane;
+
+       /* Seems RGB data bypasses the CSC always */
+       if (!format_is_yuv(format))
+               return;
+
+       /*
+        * BT.601 limited range YCbCr -> full range RGB
+        *
+        * |r|   | 6537 4769     0|   |cr  |
+        * |g| = |-3330 4769 -1605| x |y-64|
+        * |b|   |    0 4769  8263|   |cb  |
+        *
+        * Cb and Cr apparently come in as signed already, so no
+        * need for any offset. For Y we need to remove the offset.
+        */
+       I915_WRITE(SPCSCYGOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
+       I915_WRITE(SPCSCCBOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+       I915_WRITE(SPCSCCROFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+
+       I915_WRITE(SPCSCC01(plane), SPCSC_C1(4769) | SPCSC_C0(6537));
+       I915_WRITE(SPCSCC23(plane), SPCSC_C1(-3330) | SPCSC_C0(0));
+       I915_WRITE(SPCSCC45(plane), SPCSC_C1(-1605) | SPCSC_C0(4769));
+       I915_WRITE(SPCSCC67(plane), SPCSC_C1(4769) | SPCSC_C0(0));
+       I915_WRITE(SPCSCC8(plane), SPCSC_C0(8263));
+
+       I915_WRITE(SPCSCYGICLAMP(plane), SPCSC_IMAX(940) | SPCSC_IMIN(64));
+       I915_WRITE(SPCSCCBICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+       I915_WRITE(SPCSCCRICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+
+       I915_WRITE(SPCSCYGOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+       I915_WRITE(SPCSCCBOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+       I915_WRITE(SPCSCCROCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+}
+
 static void
 vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
                 struct drm_framebuffer *fb,
@@ -249,6 +506,9 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
 
        intel_update_primary_plane(intel_crtc);
 
+       if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
+               chv_update_csc(intel_plane, fb->pixel_format);
+
        I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
        I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
 
@@ -257,6 +517,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
        else
                I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
 
+       I915_WRITE(SPCONSTALPHA(pipe, plane), 0);
+
        I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
        I915_WRITE(SPCNTR(pipe, plane), sprctl);
        I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
@@ -821,20 +1083,6 @@ ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
                key->flags = I915_SET_COLORKEY_NONE;
 }
 
-static bool
-format_is_yuv(uint32_t format)
-{
-       switch (format) {
-       case DRM_FORMAT_YUYV:
-       case DRM_FORMAT_UYVY:
-       case DRM_FORMAT_VYUY:
-       case DRM_FORMAT_YVYU:
-               return true;
-       default:
-               return false;
-       }
-}
-
 static bool colorkey_enabled(struct intel_plane *intel_plane)
 {
        struct drm_intel_sprite_colorkey key;
@@ -845,57 +1093,23 @@ static bool colorkey_enabled(struct intel_plane *intel_plane)
 }
 
 static int
-intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
-                  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-                  unsigned int crtc_w, unsigned int crtc_h,
-                  uint32_t src_x, uint32_t src_y,
-                  uint32_t src_w, uint32_t src_h)
+intel_check_sprite_plane(struct drm_plane *plane,
+                        struct intel_plane_state *state)
 {
-       struct drm_device *dev = plane->dev;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc);
        struct intel_plane *intel_plane = to_intel_plane(plane);
-       enum pipe pipe = intel_crtc->pipe;
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
-       struct drm_i915_gem_object *old_obj = intel_plane->obj;
-       int ret;
-       bool primary_enabled;
-       bool visible;
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       int crtc_x, crtc_y;
+       unsigned int crtc_w, crtc_h;
+       uint32_t src_x, src_y, src_w, src_h;
+       struct drm_rect *src = &state->src;
+       struct drm_rect *dst = &state->dst;
+       struct drm_rect *orig_src = &state->orig_src;
+       const struct drm_rect *clip = &state->clip;
        int hscale, vscale;
        int max_scale, min_scale;
        int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-       struct drm_rect src = {
-               /* sample coordinates in 16.16 fixed point */
-               .x1 = src_x,
-               .x2 = src_x + src_w,
-               .y1 = src_y,
-               .y2 = src_y + src_h,
-       };
-       struct drm_rect dst = {
-               /* integer pixels */
-               .x1 = crtc_x,
-               .x2 = crtc_x + crtc_w,
-               .y1 = crtc_y,
-               .y2 = crtc_y + crtc_h,
-       };
-       const struct drm_rect clip = {
-               .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
-               .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
-       };
-       const struct {
-               int crtc_x, crtc_y;
-               unsigned int crtc_w, crtc_h;
-               uint32_t src_x, src_y, src_w, src_h;
-       } orig = {
-               .crtc_x = crtc_x,
-               .crtc_y = crtc_y,
-               .crtc_w = crtc_w,
-               .crtc_h = crtc_h,
-               .src_x = src_x,
-               .src_y = src_y,
-               .src_w = src_w,
-               .src_h = src_h,
-       };
 
        /* Don't modify another pipe's plane */
        if (intel_plane->pipe != intel_crtc->pipe) {
@@ -927,55 +1141,55 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        max_scale = intel_plane->max_downscale << 16;
        min_scale = intel_plane->can_scale ? 1 : (1 << 16);
 
-       drm_rect_rotate(&src, fb->width << 16, fb->height << 16,
+       drm_rect_rotate(src, fb->width << 16, fb->height << 16,
                        intel_plane->rotation);
 
-       hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
+       hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
        BUG_ON(hscale < 0);
 
-       vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale);
+       vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
        BUG_ON(vscale < 0);
 
-       visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale);
+       state->visible =  drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
 
-       crtc_x = dst.x1;
-       crtc_y = dst.y1;
-       crtc_w = drm_rect_width(&dst);
-       crtc_h = drm_rect_height(&dst);
+       crtc_x = dst->x1;
+       crtc_y = dst->y1;
+       crtc_w = drm_rect_width(dst);
+       crtc_h = drm_rect_height(dst);
 
-       if (visible) {
+       if (state->visible) {
                /* check again in case clipping clamped the results */
-               hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale);
+               hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
                if (hscale < 0) {
                        DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
-                       drm_rect_debug_print(&src, true);
-                       drm_rect_debug_print(&dst, false);
+                       drm_rect_debug_print(src, true);
+                       drm_rect_debug_print(dst, false);
 
                        return hscale;
                }
 
-               vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale);
+               vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
                if (vscale < 0) {
                        DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
-                       drm_rect_debug_print(&src, true);
-                       drm_rect_debug_print(&dst, false);
+                       drm_rect_debug_print(src, true);
+                       drm_rect_debug_print(dst, false);
 
                        return vscale;
                }
 
                /* Make the source viewport size an exact multiple of the scaling factors. */
-               drm_rect_adjust_size(&src,
-                                    drm_rect_width(&dst) * hscale - drm_rect_width(&src),
-                                    drm_rect_height(&dst) * vscale - drm_rect_height(&src));
+               drm_rect_adjust_size(src,
+                                    drm_rect_width(dst) * hscale - drm_rect_width(src),
+                                    drm_rect_height(dst) * vscale - drm_rect_height(src));
 
-               drm_rect_rotate_inv(&src, fb->width << 16, fb->height << 16,
+               drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
                                    intel_plane->rotation);
 
                /* sanity check to make sure the src viewport wasn't enlarged */
-               WARN_ON(src.x1 < (int) src_x ||
-                       src.y1 < (int) src_y ||
-                       src.x2 > (int) (src_x + src_w) ||
-                       src.y2 > (int) (src_y + src_h));
+               WARN_ON(src->x1 < (int) orig_src->x1 ||
+                       src->y1 < (int) orig_src->y1 ||
+                       src->x2 > (int) orig_src->x2 ||
+                       src->y2 > (int) orig_src->y2);
 
                /*
                 * Hardware doesn't handle subpixel coordinates.
@@ -983,10 +1197,10 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                 * increase the source viewport size, because that could
                 * push the downscaling factor out of bounds.
                 */
-               src_x = src.x1 >> 16;
-               src_w = drm_rect_width(&src) >> 16;
-               src_y = src.y1 >> 16;
-               src_h = drm_rect_height(&src) >> 16;
+               src_x = src->x1 >> 16;
+               src_w = drm_rect_width(src) >> 16;
+               src_y = src->y1 >> 16;
+               src_h = drm_rect_height(src) >> 16;
 
                if (format_is_yuv(fb->pixel_format)) {
                        src_x &= ~1;
@@ -1000,12 +1214,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                                crtc_w &= ~1;
 
                        if (crtc_w == 0)
-                               visible = false;
+                               state->visible = false;
                }
        }
 
        /* Check size restrictions when scaling */
-       if (visible && (src_w != crtc_w || src_h != crtc_h)) {
+       if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
                unsigned int width_bytes;
 
                WARN_ON(!intel_plane->can_scale);
@@ -1013,12 +1227,13 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                /* FIXME interlacing min height is 6 */
 
                if (crtc_w < 3 || crtc_h < 3)
-                       visible = false;
+                       state->visible = false;
 
                if (src_w < 3 || src_h < 3)
-                       visible = false;
+                       state->visible = false;
 
-               width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size;
+               width_bytes = ((src_x * pixel_size) & 63) +
+                                       src_w * pixel_size;
 
                if (src_w > 2048 || src_h > 2048 ||
                    width_bytes > 4096 || fb->pitches[0] > 4096) {
@@ -1027,42 +1242,90 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                }
        }
 
-       dst.x1 = crtc_x;
-       dst.x2 = crtc_x + crtc_w;
-       dst.y1 = crtc_y;
-       dst.y2 = crtc_y + crtc_h;
+       if (state->visible) {
+               src->x1 = src_x;
+               src->x2 = src_x + src_w;
+               src->y1 = src_y;
+               src->y2 = src_y + src_h;
+       }
 
-       /*
-        * If the sprite is completely covering the primary plane,
-        * we can disable the primary and save power.
-        */
-       primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane);
-       WARN_ON(!primary_enabled && !visible && intel_crtc->active);
+       dst->x1 = crtc_x;
+       dst->x2 = crtc_x + crtc_w;
+       dst->y1 = crtc_y;
+       dst->y2 = crtc_y + crtc_h;
 
-       mutex_lock(&dev->struct_mutex);
+       return 0;
+}
 
-       /* Note that this will apply the VT-d workaround for scanouts,
-        * which is more restrictive than required for sprites. (The
-        * primary plane requires 256KiB alignment with 64 PTE padding,
-        * the sprite planes only require 128KiB alignment and 32 PTE padding.
-        */
-       ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+static int
+intel_prepare_sprite_plane(struct drm_plane *plane,
+                          struct intel_plane_state *state)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_crtc *crtc = state->crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       enum pipe pipe = intel_crtc->pipe;
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       struct drm_i915_gem_object *old_obj = intel_plane->obj;
+       int ret;
 
-       i915_gem_track_fb(old_obj, obj,
-                         INTEL_FRONTBUFFER_SPRITE(pipe));
-       mutex_unlock(&dev->struct_mutex);
+       if (old_obj != obj) {
+               mutex_lock(&dev->struct_mutex);
 
-       if (ret)
-               return ret;
+               /* Note that this will apply the VT-d workaround for scanouts,
+                * which is more restrictive than required for sprites. (The
+                * primary plane requires 256KiB alignment with 64 PTE padding,
+                * the sprite planes only require 128KiB alignment and 32 PTE
+                * padding.
+                */
+               ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
+               if (ret == 0)
+                       i915_gem_track_fb(old_obj, obj,
+                                         INTEL_FRONTBUFFER_SPRITE(pipe));
+               mutex_unlock(&dev->struct_mutex);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
 
-       intel_plane->crtc_x = orig.crtc_x;
-       intel_plane->crtc_y = orig.crtc_y;
-       intel_plane->crtc_w = orig.crtc_w;
-       intel_plane->crtc_h = orig.crtc_h;
-       intel_plane->src_x = orig.src_x;
-       intel_plane->src_y = orig.src_y;
-       intel_plane->src_w = orig.src_w;
-       intel_plane->src_h = orig.src_h;
+static void
+intel_commit_sprite_plane(struct drm_plane *plane,
+                         struct intel_plane_state *state)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_crtc *crtc = state->crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       enum pipe pipe = intel_crtc->pipe;
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       struct drm_i915_gem_object *old_obj = intel_plane->obj;
+       int crtc_x, crtc_y;
+       unsigned int crtc_w, crtc_h;
+       uint32_t src_x, src_y, src_w, src_h;
+       struct drm_rect *dst = &state->dst;
+       const struct drm_rect *clip = &state->clip;
+       bool primary_enabled;
+
+       /*
+        * If the sprite is completely covering the primary plane,
+        * we can disable the primary and save power.
+        */
+       primary_enabled = !drm_rect_equals(dst, clip) || colorkey_enabled(intel_plane);
+       WARN_ON(!primary_enabled && !state->visible && intel_crtc->active);
+
+       intel_plane->crtc_x = state->orig_dst.x1;
+       intel_plane->crtc_y = state->orig_dst.y1;
+       intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
+       intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
+       intel_plane->src_x = state->orig_src.x1;
+       intel_plane->src_y = state->orig_src.y1;
+       intel_plane->src_w = drm_rect_width(&state->orig_src);
+       intel_plane->src_h = drm_rect_height(&state->orig_src);
        intel_plane->obj = obj;
 
        if (intel_crtc->active) {
@@ -1076,12 +1339,22 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                if (primary_was_enabled && !primary_enabled)
                        intel_pre_disable_primary(crtc);
 
-               if (visible)
+               if (state->visible) {
+                       crtc_x = state->dst.x1;
+                       crtc_y = state->dst.y1;
+                       crtc_w = drm_rect_width(&state->dst);
+                       crtc_h = drm_rect_height(&state->dst);
+                       src_x = state->src.x1;
+                       src_y = state->src.y1;
+                       src_w = drm_rect_width(&state->src);
+                       src_h = drm_rect_height(&state->src);
                        intel_plane->update_plane(plane, crtc, fb, obj,
                                                  crtc_x, crtc_y, crtc_w, crtc_h,
                                                  src_x, src_y, src_w, src_h);
-               else
+               } else {
                        intel_plane->disable_plane(plane, crtc);
+               }
+
 
                intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
 
@@ -1090,21 +1363,65 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        }
 
        /* Unpin old obj after new one is active to avoid ugliness */
-       if (old_obj) {
+       if (old_obj && old_obj != obj) {
+
                /*
                 * It's fairly common to simply update the position of
                 * an existing object.  In that case, we don't need to
                 * wait for vblank to avoid ugliness, we only need to
                 * do the pin & ref bookkeeping.
                 */
-               if (old_obj != obj && intel_crtc->active)
+               if (intel_crtc->active)
                        intel_wait_for_vblank(dev, intel_crtc->pipe);
 
                mutex_lock(&dev->struct_mutex);
                intel_unpin_fb_obj(old_obj);
                mutex_unlock(&dev->struct_mutex);
        }
+}
+
+static int
+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+                  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                  unsigned int crtc_w, unsigned int crtc_h,
+                  uint32_t src_x, uint32_t src_y,
+                  uint32_t src_w, uint32_t src_h)
+{
+       struct intel_plane_state state;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int ret;
 
+       state.crtc = crtc;
+       state.fb = fb;
+
+       /* sample coordinates in 16.16 fixed point */
+       state.src.x1 = src_x;
+       state.src.x2 = src_x + src_w;
+       state.src.y1 = src_y;
+       state.src.y2 = src_y + src_h;
+
+       /* integer pixels */
+       state.dst.x1 = crtc_x;
+       state.dst.x2 = crtc_x + crtc_w;
+       state.dst.y1 = crtc_y;
+       state.dst.y2 = crtc_y + crtc_h;
+
+       state.clip.x1 = 0;
+       state.clip.y1 = 0;
+       state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
+       state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+       state.orig_src = state.src;
+       state.orig_dst = state.dst;
+
+       ret = intel_check_sprite_plane(plane, &state);
+       if (ret)
+               return ret;
+
+       ret = intel_prepare_sprite_plane(plane, &state);
+       if (ret)
+               return ret;
+
+       intel_commit_sprite_plane(plane, &state);
        return 0;
 }
 
@@ -1305,6 +1622,18 @@ static uint32_t vlv_plane_formats[] = {
        DRM_FORMAT_VYUY,
 };
 
+static uint32_t skl_plane_formats[] = {
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+};
+
 int
 intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
 {
@@ -1368,7 +1697,21 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
                        num_plane_formats = ARRAY_SIZE(snb_plane_formats);
                }
                break;
-
+       case 9:
+               /*
+                * FIXME: Skylake planes can be scaled (with some restrictions),
+                * but this is for another time.
+                */
+               intel_plane->can_scale = false;
+               intel_plane->max_downscale = 1;
+               intel_plane->update_plane = skl_update_plane;
+               intel_plane->disable_plane = skl_disable_plane;
+               intel_plane->update_colorkey = skl_update_colorkey;
+               intel_plane->get_colorkey = skl_get_colorkey;
+
+               plane_formats = skl_plane_formats;
+               num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+               break;
        default:
                kfree(intel_plane);
                return -ENODEV;
index c14341ca3ef9386a0be1b432206f00c15ddae09b..6f5f59b880f5ecf69bc5645e3f8af60d1ee04793 100644 (file)
@@ -1182,18 +1182,17 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long irqflags;
        u32 tv_ctl, save_tv_ctl;
        u32 tv_dac, save_tv_dac;
        int type;
 
        /* Disable TV interrupts around load detect or we'll recurse */
        if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               spin_lock_irq(&dev_priv->irq_lock);
                i915_disable_pipestat(dev_priv, 0,
                                      PIPE_HOTPLUG_INTERRUPT_STATUS |
                                      PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+               spin_unlock_irq(&dev_priv->irq_lock);
        }
 
        save_tv_dac = tv_dac = I915_READ(TV_DAC);
@@ -1266,11 +1265,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
 
        /* Restore interrupt config */
        if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               spin_lock_irq(&dev_priv->irq_lock);
                i915_enable_pipestat(dev_priv, 0,
                                     PIPE_HOTPLUG_INTERRUPT_STATUS |
                                     PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+               spin_unlock_irq(&dev_priv->irq_lock);
        }
 
        return type;
index 918b761639651042edebf1ab7a3d886973774a15..46de8d75b4bf6fe9928bc0656a572e6139f8632f 100644 (file)
 static void
 assert_device_not_suspended(struct drm_i915_private *dev_priv)
 {
-       WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
-            "Device suspended\n");
+       WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
+                 "Device suspended\n");
 }
 
 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
 {
-       u32 gt_thread_status_mask;
-
-       if (IS_HASWELL(dev_priv->dev))
-               gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
-       else
-               gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
-
        /* w/a for a sporadic read returning 0 by waiting for the GT
         * thread to wake up.
         */
-       if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
+       if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
+                               GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
                DRM_ERROR("GT thread status wait timed out\n");
 }
 
@@ -120,8 +114,7 @@ static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
                DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
        /* WaRsForcewakeWaitTC0:ivb,hsw */
-       if (INTEL_INFO(dev_priv->dev)->gen < 8)
-               __gen6_gt_wait_for_thread_c0(dev_priv);
+       __gen6_gt_wait_for_thread_c0(dev_priv);
 }
 
 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
@@ -229,10 +222,6 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
                                        FORCEWAKE_ACK_TIMEOUT_MS))
                        DRM_ERROR("Timed out: waiting for media to ack.\n");
        }
-
-       /* WaRsForcewakeWaitTC0:vlv */
-       if (!IS_CHERRYVIEW(dev_priv->dev))
-               __gen6_gt_wait_for_thread_c0(dev_priv);
 }
 
 static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
@@ -299,6 +288,154 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
+static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+       __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
+                       _MASKED_BIT_DISABLE(0xffff));
+
+       __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
+                       _MASKED_BIT_DISABLE(0xffff));
+
+       __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
+                       _MASKED_BIT_DISABLE(0xffff));
+}
+
+static void
+__gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+{
+       /* Check for Render Engine */
+       if (FORCEWAKE_RENDER & fw_engine) {
+               if (wait_for_atomic((__raw_i915_read32(dev_priv,
+                                               FORCEWAKE_ACK_RENDER_GEN9) &
+                                               FORCEWAKE_KERNEL) == 0,
+                                       FORCEWAKE_ACK_TIMEOUT_MS))
+                       DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
+
+               __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
+                                  _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+               if (wait_for_atomic((__raw_i915_read32(dev_priv,
+                                               FORCEWAKE_ACK_RENDER_GEN9) &
+                                               FORCEWAKE_KERNEL),
+                                       FORCEWAKE_ACK_TIMEOUT_MS))
+                       DRM_ERROR("Timed out: waiting for Render to ack.\n");
+       }
+
+       /* Check for Media Engine */
+       if (FORCEWAKE_MEDIA & fw_engine) {
+               if (wait_for_atomic((__raw_i915_read32(dev_priv,
+                                               FORCEWAKE_ACK_MEDIA_GEN9) &
+                                               FORCEWAKE_KERNEL) == 0,
+                                       FORCEWAKE_ACK_TIMEOUT_MS))
+                       DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
+
+               __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
+                                  _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+               if (wait_for_atomic((__raw_i915_read32(dev_priv,
+                                               FORCEWAKE_ACK_MEDIA_GEN9) &
+                                               FORCEWAKE_KERNEL),
+                                       FORCEWAKE_ACK_TIMEOUT_MS))
+                       DRM_ERROR("Timed out: waiting for Media to ack.\n");
+       }
+
+       /* Check for Blitter Engine */
+       if (FORCEWAKE_BLITTER & fw_engine) {
+               if (wait_for_atomic((__raw_i915_read32(dev_priv,
+                                               FORCEWAKE_ACK_BLITTER_GEN9) &
+                                               FORCEWAKE_KERNEL) == 0,
+                                       FORCEWAKE_ACK_TIMEOUT_MS))
+                       DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");
+
+               __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
+                                  _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+               if (wait_for_atomic((__raw_i915_read32(dev_priv,
+                                               FORCEWAKE_ACK_BLITTER_GEN9) &
+                                               FORCEWAKE_KERNEL),
+                                       FORCEWAKE_ACK_TIMEOUT_MS))
+                       DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
+       }
+}
+
+static void
+__gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
+{
+       /* Check for Render Engine */
+       if (FORCEWAKE_RENDER & fw_engine)
+               __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
+                               _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+       /* Check for Media Engine */
+       if (FORCEWAKE_MEDIA & fw_engine)
+               __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
+                               _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+       /* Check for Blitter Engine */
+       if (FORCEWAKE_BLITTER & fw_engine)
+               __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
+                               _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+}
+
+static void
+gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       if (FORCEWAKE_RENDER & fw_engine) {
+               if (dev_priv->uncore.fw_rendercount++ == 0)
+                       dev_priv->uncore.funcs.force_wake_get(dev_priv,
+                                                       FORCEWAKE_RENDER);
+       }
+
+       if (FORCEWAKE_MEDIA & fw_engine) {
+               if (dev_priv->uncore.fw_mediacount++ == 0)
+                       dev_priv->uncore.funcs.force_wake_get(dev_priv,
+                                                       FORCEWAKE_MEDIA);
+       }
+
+       if (FORCEWAKE_BLITTER & fw_engine) {
+               if (dev_priv->uncore.fw_blittercount++ == 0)
+                       dev_priv->uncore.funcs.force_wake_get(dev_priv,
+                                                       FORCEWAKE_BLITTER);
+       }
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       if (FORCEWAKE_RENDER & fw_engine) {
+               WARN_ON(dev_priv->uncore.fw_rendercount == 0);
+               if (--dev_priv->uncore.fw_rendercount == 0)
+                       dev_priv->uncore.funcs.force_wake_put(dev_priv,
+                                                       FORCEWAKE_RENDER);
+       }
+
+       if (FORCEWAKE_MEDIA & fw_engine) {
+               WARN_ON(dev_priv->uncore.fw_mediacount == 0);
+               if (--dev_priv->uncore.fw_mediacount == 0)
+                       dev_priv->uncore.funcs.force_wake_put(dev_priv,
+                                                       FORCEWAKE_MEDIA);
+       }
+
+       if (FORCEWAKE_BLITTER & fw_engine) {
+               WARN_ON(dev_priv->uncore.fw_blittercount == 0);
+               if (--dev_priv->uncore.fw_blittercount == 0)
+                       dev_priv->uncore.funcs.force_wake_put(dev_priv,
+                                                       FORCEWAKE_BLITTER);
+       }
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
 static void gen6_force_wake_timer(unsigned long arg)
 {
        struct drm_i915_private *dev_priv = (void *)arg;
@@ -337,6 +474,9 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
        if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
                __gen7_gt_force_wake_mt_reset(dev_priv);
 
+       if (IS_GEN9(dev))
+               __gen9_gt_force_wake_mt_reset(dev_priv);
+
        if (restore) { /* If reset with a user forcewake, try to restore */
                unsigned fw = 0;
 
@@ -346,6 +486,15 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
 
                        if (dev_priv->uncore.fw_mediacount)
                                fw |= FORCEWAKE_MEDIA;
+               } else if (IS_GEN9(dev)) {
+                       if (dev_priv->uncore.fw_rendercount)
+                               fw |= FORCEWAKE_RENDER;
+
+                       if (dev_priv->uncore.fw_mediacount)
+                               fw |= FORCEWAKE_MEDIA;
+
+                       if (dev_priv->uncore.fw_blittercount)
+                               fw |= FORCEWAKE_BLITTER;
                } else {
                        if (dev_priv->uncore.forcewake_count)
                                fw = FORCEWAKE_ALL;
@@ -363,7 +512,8 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
-void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
+static void __intel_uncore_early_sanitize(struct drm_device *dev,
+                                         bool restore_forcewake)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -389,6 +539,12 @@ void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
        intel_uncore_forcewake_reset(dev, restore_forcewake);
 }
 
+void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
+{
+       __intel_uncore_early_sanitize(dev, restore_forcewake);
+       i915_check_and_clear_faults(dev);
+}
+
 void intel_uncore_sanitize(struct drm_device *dev)
 {
        /* BIOS often leaves RC6 enabled, but disable it for hw init */
@@ -410,6 +566,10 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
 
        intel_runtime_pm_get(dev_priv);
 
+       /* Redirect to Gen9 specific routine */
+       if (IS_GEN9(dev_priv->dev))
+               return gen9_force_wake_get(dev_priv, fw_engine);
+
        /* Redirect to VLV specific routine */
        if (IS_VALLEYVIEW(dev_priv->dev))
                return vlv_force_wake_get(dev_priv, fw_engine);
@@ -431,6 +591,12 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
        if (!dev_priv->uncore.funcs.force_wake_put)
                return;
 
+       /* Redirect to Gen9 specific routine */
+       if (IS_GEN9(dev_priv->dev)) {
+               gen9_force_wake_put(dev_priv, fw_engine);
+               goto out;
+       }
+
        /* Redirect to VLV specific routine */
        if (IS_VALLEYVIEW(dev_priv->dev)) {
                vlv_force_wake_put(dev_priv, fw_engine);
@@ -504,6 +670,38 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
         REG_RANGE((reg), 0x14000, 0x14400) || \
         REG_RANGE((reg), 0x22000, 0x24000))
 
+#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
+       REG_RANGE((reg), 0xB00,  0x2000)
+
+#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
+       (REG_RANGE((reg), 0x2000, 0x2700) || \
+        REG_RANGE((reg), 0x3000, 0x4000) || \
+        REG_RANGE((reg), 0x5200, 0x8000) || \
+        REG_RANGE((reg), 0x8140, 0x8160) || \
+        REG_RANGE((reg), 0x8300, 0x8500) || \
+        REG_RANGE((reg), 0x8C00, 0x8D00) || \
+        REG_RANGE((reg), 0xB000, 0xB480) || \
+        REG_RANGE((reg), 0xE000, 0xE900) || \
+        REG_RANGE((reg), 0x24400, 0x24800))
+
+#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
+       (REG_RANGE((reg), 0x8130, 0x8140) || \
+        REG_RANGE((reg), 0x8800, 0x8A00) || \
+        REG_RANGE((reg), 0xD000, 0xD800) || \
+        REG_RANGE((reg), 0x12000, 0x14000) || \
+        REG_RANGE((reg), 0x1A000, 0x1EA00) || \
+        REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
+       REG_RANGE((reg), 0x9400, 0x9800)
+
+#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
+       ((reg) < 0x40000 &&\
+        !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
+        !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
+        !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
+        !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
+
 static void
 ilk_dummy_write(struct drm_i915_private *dev_priv)
 {
@@ -634,6 +832,45 @@ chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
        REG_READ_FOOTER; \
 }
 
+#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg)    \
+        ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
+
+#define __gen9_read(x) \
+static u##x \
+gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+       REG_READ_HEADER(x); \
+       if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+               val = __raw_i915_read##x(dev_priv, reg); \
+       } else { \
+               unsigned fwengine = 0; \
+               if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
+                       if (dev_priv->uncore.fw_rendercount == 0) \
+                               fwengine = FORCEWAKE_RENDER; \
+               } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
+                       if (dev_priv->uncore.fw_mediacount == 0) \
+                               fwengine = FORCEWAKE_MEDIA; \
+               } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
+                       if (dev_priv->uncore.fw_rendercount == 0) \
+                               fwengine |= FORCEWAKE_RENDER; \
+                       if (dev_priv->uncore.fw_mediacount == 0) \
+                               fwengine |= FORCEWAKE_MEDIA; \
+               } else { \
+                       if (dev_priv->uncore.fw_blittercount == 0) \
+                               fwengine = FORCEWAKE_BLITTER; \
+               } \
+               if (fwengine) \
+                       dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+               val = __raw_i915_read##x(dev_priv, reg); \
+               if (fwengine) \
+                       dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+       } \
+       REG_READ_FOOTER; \
+}
+
+__gen9_read(8)
+__gen9_read(16)
+__gen9_read(32)
+__gen9_read(64)
 __chv_read(8)
 __chv_read(16)
 __chv_read(32)
@@ -655,6 +892,7 @@ __gen4_read(16)
 __gen4_read(32)
 __gen4_read(64)
 
+#undef __gen9_read
 #undef __chv_read
 #undef __vlv_read
 #undef __gen6_read
@@ -792,6 +1030,69 @@ chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
        REG_WRITE_FOOTER; \
 }
 
+static const u32 gen9_shadowed_regs[] = {
+       RING_TAIL(RENDER_RING_BASE),
+       RING_TAIL(GEN6_BSD_RING_BASE),
+       RING_TAIL(VEBOX_RING_BASE),
+       RING_TAIL(BLT_RING_BASE),
+       FORCEWAKE_BLITTER_GEN9,
+       FORCEWAKE_RENDER_GEN9,
+       FORCEWAKE_MEDIA_GEN9,
+       GEN6_RPNSWREQ,
+       GEN6_RC_VIDEO_FREQ,
+       /* TODO: Other registers are not yet used */
+};
+
+static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
+{
+       int i;
+       for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
+               if (reg == gen9_shadowed_regs[i])
+                       return true;
+
+       return false;
+}
+
+#define __gen9_write(x) \
+static void \
+gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
+               bool trace) { \
+       REG_WRITE_HEADER; \
+       if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
+                       is_gen9_shadowed(dev_priv, reg)) { \
+               __raw_i915_write##x(dev_priv, reg, val); \
+       } else { \
+               unsigned fwengine = 0; \
+               if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
+                       if (dev_priv->uncore.fw_rendercount == 0) \
+                               fwengine = FORCEWAKE_RENDER; \
+               } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
+                       if (dev_priv->uncore.fw_mediacount == 0) \
+                               fwengine = FORCEWAKE_MEDIA; \
+               } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
+                       if (dev_priv->uncore.fw_rendercount == 0) \
+                               fwengine |= FORCEWAKE_RENDER; \
+                       if (dev_priv->uncore.fw_mediacount == 0) \
+                               fwengine |= FORCEWAKE_MEDIA; \
+               } else { \
+                       if (dev_priv->uncore.fw_blittercount == 0) \
+                               fwengine = FORCEWAKE_BLITTER; \
+               } \
+               if (fwengine) \
+                       dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+                                       fwengine); \
+               __raw_i915_write##x(dev_priv, reg, val); \
+               if (fwengine) \
+                       dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+                                       fwengine); \
+       } \
+       REG_WRITE_FOOTER; \
+}
+
+__gen9_write(8)
+__gen9_write(16)
+__gen9_write(32)
+__gen9_write(64)
 __chv_write(8)
 __chv_write(16)
 __chv_write(32)
@@ -817,6 +1118,7 @@ __gen4_write(16)
 __gen4_write(32)
 __gen4_write(64)
 
+#undef __gen9_write
 #undef __chv_write
 #undef __gen8_write
 #undef __hsw_write
@@ -826,6 +1128,22 @@ __gen4_write(64)
 #undef REG_WRITE_FOOTER
 #undef REG_WRITE_HEADER
 
+#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
+do { \
+       dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
+       dev_priv->uncore.funcs.mmio_writew = x##_write16; \
+       dev_priv->uncore.funcs.mmio_writel = x##_write32; \
+       dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
+} while (0)
+
+#define ASSIGN_READ_MMIO_VFUNCS(x) \
+do { \
+       dev_priv->uncore.funcs.mmio_readb = x##_read8; \
+       dev_priv->uncore.funcs.mmio_readw = x##_read16; \
+       dev_priv->uncore.funcs.mmio_readl = x##_read32; \
+       dev_priv->uncore.funcs.mmio_readq = x##_read64; \
+} while (0)
+
 void intel_uncore_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -833,9 +1151,12 @@ void intel_uncore_init(struct drm_device *dev)
        setup_timer(&dev_priv->uncore.force_wake_timer,
                    gen6_force_wake_timer, (unsigned long)dev_priv);
 
-       intel_uncore_early_sanitize(dev, false);
+       __intel_uncore_early_sanitize(dev, false);
 
-       if (IS_VALLEYVIEW(dev)) {
+       if (IS_GEN9(dev)) {
+               dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
+               dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
+       } else if (IS_VALLEYVIEW(dev)) {
                dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
                dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
        } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
@@ -881,77 +1202,52 @@ void intel_uncore_init(struct drm_device *dev)
 
        switch (INTEL_INFO(dev)->gen) {
        default:
+               WARN_ON(1);
+               return;
+       case 9:
+               ASSIGN_WRITE_MMIO_VFUNCS(gen9);
+               ASSIGN_READ_MMIO_VFUNCS(gen9);
+               break;
+       case 8:
                if (IS_CHERRYVIEW(dev)) {
-                       dev_priv->uncore.funcs.mmio_writeb  = chv_write8;
-                       dev_priv->uncore.funcs.mmio_writew  = chv_write16;
-                       dev_priv->uncore.funcs.mmio_writel  = chv_write32;
-                       dev_priv->uncore.funcs.mmio_writeq  = chv_write64;
-                       dev_priv->uncore.funcs.mmio_readb  = chv_read8;
-                       dev_priv->uncore.funcs.mmio_readw  = chv_read16;
-                       dev_priv->uncore.funcs.mmio_readl  = chv_read32;
-                       dev_priv->uncore.funcs.mmio_readq  = chv_read64;
+                       ASSIGN_WRITE_MMIO_VFUNCS(chv);
+                       ASSIGN_READ_MMIO_VFUNCS(chv);
 
                } else {
-                       dev_priv->uncore.funcs.mmio_writeb  = gen8_write8;
-                       dev_priv->uncore.funcs.mmio_writew  = gen8_write16;
-                       dev_priv->uncore.funcs.mmio_writel  = gen8_write32;
-                       dev_priv->uncore.funcs.mmio_writeq  = gen8_write64;
-                       dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
-                       dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
-                       dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
-                       dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+                       ASSIGN_WRITE_MMIO_VFUNCS(gen8);
+                       ASSIGN_READ_MMIO_VFUNCS(gen6);
                }
                break;
        case 7:
        case 6:
                if (IS_HASWELL(dev)) {
-                       dev_priv->uncore.funcs.mmio_writeb  = hsw_write8;
-                       dev_priv->uncore.funcs.mmio_writew  = hsw_write16;
-                       dev_priv->uncore.funcs.mmio_writel  = hsw_write32;
-                       dev_priv->uncore.funcs.mmio_writeq  = hsw_write64;
+                       ASSIGN_WRITE_MMIO_VFUNCS(hsw);
                } else {
-                       dev_priv->uncore.funcs.mmio_writeb  = gen6_write8;
-                       dev_priv->uncore.funcs.mmio_writew  = gen6_write16;
-                       dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
-                       dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
+                       ASSIGN_WRITE_MMIO_VFUNCS(gen6);
                }
 
                if (IS_VALLEYVIEW(dev)) {
-                       dev_priv->uncore.funcs.mmio_readb  = vlv_read8;
-                       dev_priv->uncore.funcs.mmio_readw  = vlv_read16;
-                       dev_priv->uncore.funcs.mmio_readl  = vlv_read32;
-                       dev_priv->uncore.funcs.mmio_readq  = vlv_read64;
+                       ASSIGN_READ_MMIO_VFUNCS(vlv);
                } else {
-                       dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
-                       dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
-                       dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
-                       dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+                       ASSIGN_READ_MMIO_VFUNCS(gen6);
                }
                break;
        case 5:
-               dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
-               dev_priv->uncore.funcs.mmio_writew  = gen5_write16;
-               dev_priv->uncore.funcs.mmio_writel  = gen5_write32;
-               dev_priv->uncore.funcs.mmio_writeq  = gen5_write64;
-               dev_priv->uncore.funcs.mmio_readb  = gen5_read8;
-               dev_priv->uncore.funcs.mmio_readw  = gen5_read16;
-               dev_priv->uncore.funcs.mmio_readl  = gen5_read32;
-               dev_priv->uncore.funcs.mmio_readq  = gen5_read64;
+               ASSIGN_WRITE_MMIO_VFUNCS(gen5);
+               ASSIGN_READ_MMIO_VFUNCS(gen5);
                break;
        case 4:
        case 3:
        case 2:
-               dev_priv->uncore.funcs.mmio_writeb  = gen4_write8;
-               dev_priv->uncore.funcs.mmio_writew  = gen4_write16;
-               dev_priv->uncore.funcs.mmio_writel  = gen4_write32;
-               dev_priv->uncore.funcs.mmio_writeq  = gen4_write64;
-               dev_priv->uncore.funcs.mmio_readb  = gen4_read8;
-               dev_priv->uncore.funcs.mmio_readw  = gen4_read16;
-               dev_priv->uncore.funcs.mmio_readl  = gen4_read32;
-               dev_priv->uncore.funcs.mmio_readq  = gen4_read64;
+               ASSIGN_WRITE_MMIO_VFUNCS(gen4);
+               ASSIGN_READ_MMIO_VFUNCS(gen4);
                break;
        }
+
+       i915_check_and_clear_faults(dev);
 }
+#undef ASSIGN_WRITE_MMIO_VFUNCS
+#undef ASSIGN_READ_MMIO_VFUNCS
 
 void intel_uncore_fini(struct drm_device *dev)
 {
@@ -968,7 +1264,7 @@ static const struct register_whitelist {
        /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
        uint32_t gen_bitmask;
 } whitelist[] = {
-       { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
+       { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
 };
 
 int i915_reg_read_ioctl(struct drm_device *dev,
@@ -1053,41 +1349,34 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
        return 0;
 }
 
-static int i965_reset_complete(struct drm_device *dev)
+static int i915_reset_complete(struct drm_device *dev)
 {
        u8 gdrst;
-       pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
-       return (gdrst & GRDOM_RESET_ENABLE) == 0;
+       pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+       return (gdrst & GRDOM_RESET_STATUS) == 0;
 }
 
-static int i965_do_reset(struct drm_device *dev)
+static int i915_do_reset(struct drm_device *dev)
 {
-       int ret;
-
-       /* FIXME: i965g/gm need a display save/restore for gpu reset. */
-       return -ENODEV;
+       /* assert reset for at least 20 usec */
+       pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+       udelay(20);
+       pci_write_config_byte(dev->pdev, I915_GDRST, 0);
 
-       /*
-        * Set the domains we want to reset (GRDOM/bits 2 and 3) as
-        * well as the reset bit (GR/bit 0).  Setting the GR bit
-        * triggers the reset; when done, the hardware will clear it.
-        */
-       pci_write_config_byte(dev->pdev, I965_GDRST,
-                             GRDOM_RENDER | GRDOM_RESET_ENABLE);
-       ret =  wait_for(i965_reset_complete(dev), 500);
-       if (ret)
-               return ret;
-
-       pci_write_config_byte(dev->pdev, I965_GDRST,
-                             GRDOM_MEDIA | GRDOM_RESET_ENABLE);
-
-       ret =  wait_for(i965_reset_complete(dev), 500);
-       if (ret)
-               return ret;
+       return wait_for(i915_reset_complete(dev), 500);
+}
 
-       pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+static int g4x_reset_complete(struct drm_device *dev)
+{
+       u8 gdrst;
+       pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+       return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
 
-       return 0;
+static int g33_do_reset(struct drm_device *dev)
+{
+       pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+       return wait_for(g4x_reset_complete(dev), 500);
 }
 
 static int g4x_do_reset(struct drm_device *dev)
@@ -1095,9 +1384,9 @@ static int g4x_do_reset(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       pci_write_config_byte(dev->pdev, I965_GDRST,
+       pci_write_config_byte(dev->pdev, I915_GDRST,
                              GRDOM_RENDER | GRDOM_RESET_ENABLE);
-       ret =  wait_for(i965_reset_complete(dev), 500);
+       ret =  wait_for(g4x_reset_complete(dev), 500);
        if (ret)
                return ret;
 
@@ -1105,9 +1394,9 @@ static int g4x_do_reset(struct drm_device *dev)
        I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
        POSTING_READ(VDECCLK_GATE_D);
 
-       pci_write_config_byte(dev->pdev, I965_GDRST,
+       pci_write_config_byte(dev->pdev, I915_GDRST,
                              GRDOM_MEDIA | GRDOM_RESET_ENABLE);
-       ret =  wait_for(i965_reset_complete(dev), 500);
+       ret =  wait_for(g4x_reset_complete(dev), 500);
        if (ret)
                return ret;
 
@@ -1115,7 +1404,7 @@ static int g4x_do_reset(struct drm_device *dev)
        I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
        POSTING_READ(VDECCLK_GATE_D);
 
-       pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+       pci_write_config_byte(dev->pdev, I915_GDRST, 0);
 
        return 0;
 }
@@ -1173,8 +1462,10 @@ int intel_gpu_reset(struct drm_device *dev)
                return ironlake_do_reset(dev);
        else if (IS_G4X(dev))
                return g4x_do_reset(dev);
-       else if (IS_GEN4(dev))
-               return i965_do_reset(dev);
+       else if (IS_G33(dev))
+               return g33_do_reset(dev);
+       else if (INTEL_INFO(dev)->gen >= 3)
+               return i915_do_reset(dev);
        else
                return -ENODEV;
 }
similarity index 99%
rename from drivers/staging/imx-drm/imx-drm-core.c
rename to drivers/gpu/drm/imx/imx-drm-core.c
index 9cb222e2996f5e5391f7832ce9649bff580fd0ce..2f80072417349e1588cf6fcf2631bb91716a500a 100644 (file)
@@ -24,6 +24,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_plane_helper.h>
 
 #include "imx-drm.h"
 
index 83485ab81ce8714214c258b537fd1609e12bae2a..9872ba9abf1a28e4bdf3625e596c41a862fec291 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
 
 #include "mgag200_drv.h"
 
index 9d907c526c94d4df4d9a70a527678c2d58a743ff..5b2a1ff95d3de2ab1341dfa3fe6df3e0986e747e 100644 (file)
@@ -3,6 +3,7 @@ config DRM_MSM
        tristate "MSM DRM"
        depends on DRM
        depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+       select REGULATOR
        select DRM_KMS_HELPER
        select DRM_PANEL
        select SHMEM
index 6283dcb96af51f8bd5e70c26a507412f216996f6..143d988f8addc7471d7962149112f45ec091938f 100644 (file)
@@ -7,6 +7,7 @@ msm-y := \
        adreno/adreno_device.o \
        adreno/adreno_gpu.o \
        adreno/a3xx_gpu.o \
+       adreno/a4xx_gpu.o \
        hdmi/hdmi.o \
        hdmi/hdmi_audio.o \
        hdmi/hdmi_bridge.o \
@@ -24,12 +25,15 @@ msm-y := \
        mdp/mdp4/mdp4_irq.o \
        mdp/mdp4/mdp4_kms.o \
        mdp/mdp4/mdp4_plane.o \
+       mdp/mdp5/mdp5_cfg.o \
+       mdp/mdp5/mdp5_ctl.o \
        mdp/mdp5/mdp5_crtc.o \
        mdp/mdp5/mdp5_encoder.o \
        mdp/mdp5/mdp5_irq.o \
        mdp/mdp5/mdp5_kms.o \
        mdp/mdp5/mdp5_plane.o \
        mdp/mdp5/mdp5_smp.o \
+       msm_atomic.o \
        msm_drv.o \
        msm_fb.o \
        msm_gem.o \
index a3104598c27f8117b5917ab2bdf7c018ad514383..22882cc0a5731c396e7a27075d42116e04193a8a 100644 (file)
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (   9859 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14960 bytes, from 2014-07-27 17:22:13)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  58020 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  41068 bytes, from 2014-08-01 12:22:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  15053 bytes, from 2014-11-09 15:45:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  63169 bytes, from 2014-11-13 22:44:18)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  49097 bytes, from 2014-11-14 15:38:00)
 
 Copyright (C) 2013-2014 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -926,11 +926,11 @@ static inline uint32_t A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size
 #define A2XX_VGT_DRAW_INITIATOR_NOT_EOP                                0x00001000
 #define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX                    0x00002000
 #define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE      0x00004000
-#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK              0xffff0000
-#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT             16
-static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val)
+#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK            0xff000000
+#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT           24
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
 {
-       return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK;
+       return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
 }
 
 #define REG_A2XX_VGT_IMMED_DATA                                        0x000021fd
@@ -1243,13 +1243,13 @@ static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val)
 #define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT                    0
 static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val)
 {
-       return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
+       return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
 }
 #define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK                      0xffff0000
 #define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT                     16
 static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
 {
-       return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
+       return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
 }
 
 #define REG_A2XX_PA_SU_POINT_MINMAX                            0x00002281
@@ -1257,13 +1257,13 @@ static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
 #define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT                     0
 static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val)
 {
-       return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
+       return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
 }
 #define A2XX_PA_SU_POINT_MINMAX_MAX__MASK                      0xffff0000
 #define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT                     16
 static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
 {
-       return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
+       return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
 }
 
 #define REG_A2XX_PA_SU_LINE_CNTL                               0x00002282
@@ -1271,7 +1271,7 @@ static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
 #define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT                      0
 static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val)
 {
-       return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
+       return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
 }
 
 #define REG_A2XX_PA_SC_LINE_STIPPLE                            0x00002283
index 82d015279b47bf493fc0b5773c2306387d17c7ce..109e9a263daf6ac8e6d352e0791635e3a76e9ffa 100644 (file)
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (   9859 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14960 bytes, from 2014-07-27 17:22:13)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  58020 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  41068 bytes, from 2014-08-01 12:22:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  15053 bytes, from 2014-11-09 15:45:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  63169 bytes, from 2014-11-13 22:44:18)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  49097 bytes, from 2014-11-14 15:38:00)
 
 Copyright (C) 2013-2014 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -86,6 +86,14 @@ enum a3xx_vtx_fmt {
        VFMT_NORM_USHORT_16_16 = 29,
        VFMT_NORM_USHORT_16_16_16 = 30,
        VFMT_NORM_USHORT_16_16_16_16 = 31,
+       VFMT_UINT_32 = 32,
+       VFMT_UINT_32_32 = 33,
+       VFMT_UINT_32_32_32 = 34,
+       VFMT_UINT_32_32_32_32 = 35,
+       VFMT_INT_32 = 36,
+       VFMT_INT_32_32 = 37,
+       VFMT_INT_32_32_32 = 38,
+       VFMT_INT_32_32_32_32 = 39,
        VFMT_UBYTE_8 = 40,
        VFMT_UBYTE_8_8 = 41,
        VFMT_UBYTE_8_8_8 = 42,
@@ -112,7 +120,9 @@ enum a3xx_tex_fmt {
        TFMT_NORM_USHORT_565 = 4,
        TFMT_NORM_USHORT_5551 = 6,
        TFMT_NORM_USHORT_4444 = 7,
+       TFMT_NORM_USHORT_Z16 = 9,
        TFMT_NORM_UINT_X8Z24 = 10,
+       TFMT_FLOAT_Z32 = 11,
        TFMT_NORM_UINT_NV12_UV_TILED = 17,
        TFMT_NORM_UINT_NV12_Y_TILED = 19,
        TFMT_NORM_UINT_NV12_UV = 21,
@@ -121,18 +131,38 @@ enum a3xx_tex_fmt {
        TFMT_NORM_UINT_I420_U = 26,
        TFMT_NORM_UINT_I420_V = 27,
        TFMT_NORM_UINT_2_10_10_10 = 41,
+       TFMT_FLOAT_9_9_9_E5 = 42,
+       TFMT_FLOAT_10_11_11 = 43,
        TFMT_NORM_UINT_A8 = 44,
        TFMT_NORM_UINT_L8_A8 = 47,
        TFMT_NORM_UINT_8 = 48,
        TFMT_NORM_UINT_8_8 = 49,
        TFMT_NORM_UINT_8_8_8 = 50,
        TFMT_NORM_UINT_8_8_8_8 = 51,
+       TFMT_NORM_SINT_8_8 = 53,
+       TFMT_NORM_SINT_8_8_8_8 = 55,
+       TFMT_UINT_8_8 = 57,
+       TFMT_UINT_8_8_8_8 = 59,
+       TFMT_SINT_8_8 = 61,
+       TFMT_SINT_8_8_8_8 = 63,
        TFMT_FLOAT_16 = 64,
        TFMT_FLOAT_16_16 = 65,
        TFMT_FLOAT_16_16_16_16 = 67,
+       TFMT_UINT_16 = 68,
+       TFMT_UINT_16_16 = 69,
+       TFMT_UINT_16_16_16_16 = 71,
+       TFMT_SINT_16 = 72,
+       TFMT_SINT_16_16 = 73,
+       TFMT_SINT_16_16_16_16 = 75,
        TFMT_FLOAT_32 = 84,
        TFMT_FLOAT_32_32 = 85,
        TFMT_FLOAT_32_32_32_32 = 87,
+       TFMT_UINT_32 = 88,
+       TFMT_UINT_32_32 = 89,
+       TFMT_UINT_32_32_32_32 = 91,
+       TFMT_SINT_32 = 92,
+       TFMT_SINT_32_32 = 93,
+       TFMT_SINT_32_32_32_32 = 95,
 };
 
 enum a3xx_tex_fetchsize {
@@ -145,19 +175,34 @@ enum a3xx_tex_fetchsize {
 };
 
 enum a3xx_color_fmt {
+       RB_R5G6B5_UNORM = 0,
+       RB_R5G5B5A1_UNORM = 1,
+       RB_R4G4B4A4_UNORM = 3,
        RB_R8G8B8_UNORM = 4,
        RB_R8G8B8A8_UNORM = 8,
-       RB_Z16_UNORM = 12,
+       RB_R8G8B8A8_UINT = 10,
+       RB_R8G8B8A8_SINT = 11,
+       RB_R8G8_UNORM = 12,
+       RB_R8_UINT = 14,
+       RB_R8_SINT = 15,
+       RB_R10G10B10A2_UNORM = 16,
        RB_A8_UNORM = 20,
+       RB_R8_UNORM = 21,
        RB_R16G16B16A16_FLOAT = 27,
+       RB_R11G11B10_FLOAT = 28,
+       RB_R16_SINT = 40,
+       RB_R16G16_SINT = 41,
+       RB_R16G16B16A16_SINT = 43,
+       RB_R16_UINT = 44,
+       RB_R16G16_UINT = 45,
+       RB_R16G16B16A16_UINT = 47,
        RB_R32G32B32A32_FLOAT = 51,
-};
-
-enum a3xx_color_swap {
-       WZYX = 0,
-       WXYZ = 1,
-       ZYXW = 2,
-       XYZW = 3,
+       RB_R32_SINT = 52,
+       RB_R32G32_SINT = 53,
+       RB_R32G32B32A32_SINT = 55,
+       RB_R32_UINT = 56,
+       RB_R32G32_UINT = 57,
+       RB_R32G32B32A32_UINT = 59,
 };
 
 enum a3xx_sp_perfcounter_select {
@@ -194,6 +239,11 @@ enum a3xx_rb_blend_opcode {
        BLEND_MAX_DST_SRC = 4,
 };
 
+enum a3xx_intp_mode {
+       SMOOTH = 0,
+       FLAT = 1,
+};
+
 enum a3xx_tex_filter {
        A3XX_TEX_NEAREST = 0,
        A3XX_TEX_LINEAR = 1,
@@ -536,6 +586,10 @@ enum a3xx_tex_type {
 
 #define REG_A3XX_CP_MEQ_DATA                                   0x000001db
 
+#define REG_A3XX_CP_WFI_PEND_CTR                               0x000001f5
+
+#define REG_A3XX_RBBM_PM_OVERRIDE2                             0x0000039d
+
 #define REG_A3XX_CP_PERFCOUNTER_SELECT                         0x00000445
 
 #define REG_A3XX_CP_HW_FAULT                                   0x0000045c
@@ -550,6 +604,12 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
 
 #define REG_A3XX_CP_AHB_FAULT                                  0x0000054d
 
+#define REG_A3XX_SQ_GPR_MANAGEMENT                             0x00000d00
+
+#define REG_A3XX_SQ_INST_STORE_MANAGMENT                       0x00000d02
+
+#define REG_A3XX_TP0_CHICKEN                                   0x00000e1e
+
 #define REG_A3XX_SP_GLOBAL_MEM_SIZE                            0x00000e22
 
 #define REG_A3XX_SP_GLOBAL_MEM_ADDR                            0x00000e23
@@ -632,13 +692,13 @@ static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
 #define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT                   0
 static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val)
 {
-       return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+       return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
 }
 #define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK                    0xffff0000
 #define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT                   16
 static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
 {
-       return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+       return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
 }
 
 #define REG_A3XX_GRAS_SU_POINT_SIZE                            0x00002069
@@ -646,7 +706,7 @@ static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
 #define A3XX_GRAS_SU_POINT_SIZE__SHIFT                         0
 static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
 {
-       return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
+       return ((((int32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
 }
 
 #define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE                     0x0000206c
@@ -654,7 +714,7 @@ static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
 #define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT              0
 static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
 {
-       return ((((uint32_t)(val * 28.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
+       return ((((int32_t)(val * 16384.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
 }
 
 #define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET                    0x0000206d
@@ -662,7 +722,7 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
 #define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT                 0
 static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
 {
-       return ((((uint32_t)(val * 28.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+       return ((((int32_t)(val * 16384.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
 }
 
 #define REG_A3XX_GRAS_SU_MODE_CONTROL                          0x00002070
@@ -673,7 +733,7 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
 #define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT         3
 static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
 {
-       return ((((uint32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+       return ((((int32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
 }
 #define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET                  0x00000800
 
@@ -863,6 +923,7 @@ static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
 {
        return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
 }
+#define A3XX_RB_MRT_BUF_INFO_COLOR_SRGB                                0x00004000
 #define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK             0xfffe0000
 #define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT            17
 static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
@@ -1001,6 +1062,7 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
 {
        return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
 }
+#define A3XX_RB_COPY_CONTROL_UNK12                             0x00001000
 #define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK                   0xffffc000
 #define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT                  14
 static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
@@ -1079,7 +1141,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
 #define REG_A3XX_RB_DEPTH_CLEAR                                        0x00002101
 
 #define REG_A3XX_RB_DEPTH_INFO                                 0x00002102
-#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK                  0x00000001
+#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK                  0x00000003
 #define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT                 0
 static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
 {
@@ -1265,6 +1327,7 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_
 {
        return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
 }
+#define A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART                        0x00100000
 #define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST               0x02000000
 #define A3XX_PC_PRIM_VTX_CNTL_PSIZE                            0x04000000
 
@@ -1281,7 +1344,12 @@ static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize
 #define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART                        0x00000200
 #define A3XX_HLSQ_CONTROL_0_REG_RESERVED2                      0x00000400
 #define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE                   0x04000000
-#define A3XX_HLSQ_CONTROL_0_REG_CONSTSWITCHMODE                        0x08000000
+#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK                        0x08000000
+#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT               27
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
+{
+       return ((val) << A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
+}
 #define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE              0x10000000
 #define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE              0x20000000
 #define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE                   0x40000000
@@ -1484,6 +1552,8 @@ static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
 
 #define REG_A3XX_VFD_INDEX_OFFSET                              0x00002245
 
+#define REG_A3XX_VFD_INDEX_OFFSET                              0x00002245
+
 static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
 
 static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
@@ -1537,6 +1607,7 @@ static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
 {
        return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
 }
+#define A3XX_VFD_DECODE_INSTR_INT                              0x00100000
 #define A3XX_VFD_DECODE_INSTR_SWAP__MASK                       0x00c00000
 #define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT                      22
 static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
@@ -1604,6 +1675,102 @@ static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
 static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
 
 static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
+#define A3XX_VPC_VARYING_INTERP_MODE_C0__MASK                  0x00000003
+#define A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT                 0
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C0(enum a3xx_intp_mode val)
+{
+       return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C0__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C1__MASK                  0x0000000c
+#define A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT                 2
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C1(enum a3xx_intp_mode val)
+{
+       return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C1__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C2__MASK                  0x00000030
+#define A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT                 4
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C2(enum a3xx_intp_mode val)
+{
+       return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C2__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C3__MASK                  0x000000c0
+#define A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT                 6
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C3(enum a3xx_intp_mode val)
+{
+       return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C3__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C4__MASK                  0x00000300
+#define A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT                 8
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C4(enum a3xx_intp_mode val)
+{
+       return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C4__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C5__MASK                  0x00000c00
+#define A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT                 10
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C5(enum a3xx_intp_mode val)
+{
+       return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C5__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C6__MASK                  0x00003000
+#define A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT                 12
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C6(enum a3xx_intp_mode val)
+{
+       return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C6__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C7__MASK                  0x0000c000
+#define A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT                 14
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C7(enum a3xx_intp_mode val)
+{
+       return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C7__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C8__MASK                  0x00030000
+#define A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT                 16
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C8(enum a3xx_intp_mode val)
+{
+       return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C8__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C9__MASK                  0x000c0000
+#define A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT                 18
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C9(enum a3xx_intp_mode val)
+{
+       return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C9__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CA__MASK                  0x00300000
+#define A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT                 20
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CA(enum a3xx_intp_mode val)
+{
+       return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CA__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CB__MASK                  0x00c00000
+#define A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT                 22
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CB(enum a3xx_intp_mode val)
+{
+       return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CB__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CC__MASK                  0x03000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT                 24
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CC(enum a3xx_intp_mode val)
+{
+       return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CC__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CD__MASK                  0x0c000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT                 26
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CD(enum a3xx_intp_mode val)
+{
+       return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CD__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CE__MASK                  0x30000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT                 28
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CE(enum a3xx_intp_mode val)
+{
+       return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CE__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CF__MASK                  0xc0000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT                 30
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CF(enum a3xx_intp_mode val)
+{
+       return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CF__MASK;
+}
 
 static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
 
@@ -1928,6 +2095,8 @@ static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
        return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK;
 }
 #define A3XX_SP_FS_MRT_REG_HALF_PRECISION                      0x00000100
+#define A3XX_SP_FS_MRT_REG_SINT                                        0x00000400
+#define A3XX_SP_FS_MRT_REG_UINT                                        0x00000800
 
 static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
 
@@ -1947,6 +2116,8 @@ static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val)
        return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK;
 }
 
+#define REG_A3XX_PA_SC_AA_CONFIG                               0x00002301
+
 #define REG_A3XX_TPL1_TP_VS_TEX_OFFSET                         0x00002340
 #define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK         0x000000ff
 #define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT                0
@@ -2297,11 +2468,11 @@ static inline uint32_t A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size
 #define A3XX_VGT_DRAW_INITIATOR_NOT_EOP                                0x00001000
 #define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX                    0x00002000
 #define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE      0x00004000
-#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK              0xffff0000
-#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT             16
-static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val)
+#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK            0xff000000
+#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT           24
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
 {
-       return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK;
+       return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
 }
 
 #define REG_A3XX_VGT_IMMED_DATA                                        0x000021fd
@@ -2347,17 +2518,23 @@ static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val
 #define A3XX_TEX_SAMP_0_UNNORM_COORDS                          0x80000000
 
 #define REG_A3XX_TEX_SAMP_1                                    0x00000001
+#define A3XX_TEX_SAMP_1_LOD_BIAS__MASK                         0x000007ff
+#define A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT                                0
+static inline uint32_t A3XX_TEX_SAMP_1_LOD_BIAS(float val)
+{
+       return ((((int32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT) & A3XX_TEX_SAMP_1_LOD_BIAS__MASK;
+}
 #define A3XX_TEX_SAMP_1_MAX_LOD__MASK                          0x003ff000
 #define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT                         12
 static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val)
 {
-       return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
+       return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
 }
 #define A3XX_TEX_SAMP_1_MIN_LOD__MASK                          0xffc00000
 #define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT                         22
 static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
 {
-       return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
+       return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
 }
 
 #define REG_A3XX_TEX_CONST_0                                   0x00000000
@@ -2448,6 +2625,24 @@ static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
 }
 
 #define REG_A3XX_TEX_CONST_3                                   0x00000003
+#define A3XX_TEX_CONST_3_LAYERSZ1__MASK                                0x0000000f
+#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT                       0
+static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
+{
+       return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ1__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ1__MASK;
+}
+#define A3XX_TEX_CONST_3_DEPTH__MASK                           0x0ffe0000
+#define A3XX_TEX_CONST_3_DEPTH__SHIFT                          17
+static inline uint32_t A3XX_TEX_CONST_3_DEPTH(uint32_t val)
+{
+       return ((val) << A3XX_TEX_CONST_3_DEPTH__SHIFT) & A3XX_TEX_CONST_3_DEPTH__MASK;
+}
+#define A3XX_TEX_CONST_3_LAYERSZ2__MASK                                0xf0000000
+#define A3XX_TEX_CONST_3_LAYERSZ2__SHIFT                       28
+static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
+{
+       return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ2__MASK;
+}
 
 
 #endif /* A3XX_XML */
index 218c5b0603989b3474ef7815b91d6ec0371ab013..b66c53bdc039cafffd3be36a4fe39ccfb92fb470 100644 (file)
@@ -2,6 +2,8 @@
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
  * the Free Software Foundation.
@@ -406,6 +408,94 @@ static void a3xx_dump(struct msm_gpu *gpu)
                        gpu_read(gpu, REG_A3XX_RBBM_STATUS));
        adreno_dump(gpu);
 }
+/* Register offset defines for A3XX */
+static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
+                       REG_A3XX_CP_PFP_UCODE_DATA),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
+                       REG_A3XX_CP_PFP_UCODE_ADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
+                       REG_A3XX_CP_PROTECT_STATUS),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
+                       REG_A3XX_RBBM_PERFCTR_CTL),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
+                       REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
+                       REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
+                       REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
+                       REG_A3XX_RBBM_INT_0_STATUS),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
+                       REG_A3XX_RBBM_AHB_ERROR_STATUS),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
+                       REG_A3XX_RBBM_INT_CLEAR_CMD),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
+       REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
+                       REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
+       REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
+                       REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
+       REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
+                       REG_A3XX_VSC_SIZE_ADDRESS),
+       REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
+       REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
+       REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
+                       REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
+       REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
+                       REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
+       REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
+                       REG_A3XX_SP_VS_OBJ_START_REG),
+       REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
+                       REG_A3XX_SP_FS_OBJ_START_REG),
+       REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
+                       REG_A3XX_RBBM_PM_OVERRIDE2),
+       REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
+       REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
+                       REG_A3XX_SQ_GPR_MANAGEMENT),
+       REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
+                       REG_A3XX_SQ_INST_STORE_MANAGMENT),
+       REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
+                       REG_A3XX_RBBM_SW_RESET_CMD),
+       REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
+                       REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
+                       REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
+                       REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
+};
 
 static const struct adreno_gpu_funcs funcs = {
        .base = {
@@ -463,6 +553,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
        gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
 
        adreno_gpu->registers = a3xx_registers;
+       adreno_gpu->reg_offsets = a3xx_register_offsets;
 
        ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
        if (ret)
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
new file mode 100644 (file)
index 0000000..5a24c41
--- /dev/null
@@ -0,0 +1,2144 @@
+#ifndef A4XX_XML
+#define A4XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  15053 bytes, from 2014-11-09 15:45:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  63169 bytes, from 2014-11-13 22:44:18)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  49097 bytes, from 2014-11-14 15:38:00)
+
+Copyright (C) 2013-2014 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a4xx_color_fmt {
+       RB4_A8_UNORM = 1,
+       RB4_R5G6R5_UNORM = 14,
+       RB4_Z16_UNORM = 15,
+       RB4_R8G8B8_UNORM = 25,
+       RB4_R8G8B8A8_UNORM = 26,
+};
+
+enum a4xx_tile_mode {
+       TILE4_LINEAR = 0,
+       TILE4_3 = 3,
+};
+
+enum a4xx_rb_blend_opcode {
+       BLEND_DST_PLUS_SRC = 0,
+       BLEND_SRC_MINUS_DST = 1,
+       BLEND_DST_MINUS_SRC = 2,
+       BLEND_MIN_DST_SRC = 3,
+       BLEND_MAX_DST_SRC = 4,
+};
+
+enum a4xx_vtx_fmt {
+       VFMT4_FLOAT_32 = 1,
+       VFMT4_FLOAT_32_32 = 2,
+       VFMT4_FLOAT_32_32_32 = 3,
+       VFMT4_FLOAT_32_32_32_32 = 4,
+       VFMT4_FLOAT_16 = 5,
+       VFMT4_FLOAT_16_16 = 6,
+       VFMT4_FLOAT_16_16_16 = 7,
+       VFMT4_FLOAT_16_16_16_16 = 8,
+       VFMT4_FIXED_32 = 9,
+       VFMT4_FIXED_32_32 = 10,
+       VFMT4_FIXED_32_32_32 = 11,
+       VFMT4_FIXED_32_32_32_32 = 12,
+       VFMT4_SHORT_16 = 16,
+       VFMT4_SHORT_16_16 = 17,
+       VFMT4_SHORT_16_16_16 = 18,
+       VFMT4_SHORT_16_16_16_16 = 19,
+       VFMT4_USHORT_16 = 20,
+       VFMT4_USHORT_16_16 = 21,
+       VFMT4_USHORT_16_16_16 = 22,
+       VFMT4_USHORT_16_16_16_16 = 23,
+       VFMT4_NORM_SHORT_16 = 24,
+       VFMT4_NORM_SHORT_16_16 = 25,
+       VFMT4_NORM_SHORT_16_16_16 = 26,
+       VFMT4_NORM_SHORT_16_16_16_16 = 27,
+       VFMT4_NORM_USHORT_16 = 28,
+       VFMT4_NORM_USHORT_16_16 = 29,
+       VFMT4_NORM_USHORT_16_16_16 = 30,
+       VFMT4_NORM_USHORT_16_16_16_16 = 31,
+       VFMT4_UBYTE_8 = 40,
+       VFMT4_UBYTE_8_8 = 41,
+       VFMT4_UBYTE_8_8_8 = 42,
+       VFMT4_UBYTE_8_8_8_8 = 43,
+       VFMT4_NORM_UBYTE_8 = 44,
+       VFMT4_NORM_UBYTE_8_8 = 45,
+       VFMT4_NORM_UBYTE_8_8_8 = 46,
+       VFMT4_NORM_UBYTE_8_8_8_8 = 47,
+       VFMT4_BYTE_8 = 48,
+       VFMT4_BYTE_8_8 = 49,
+       VFMT4_BYTE_8_8_8 = 50,
+       VFMT4_BYTE_8_8_8_8 = 51,
+       VFMT4_NORM_BYTE_8 = 52,
+       VFMT4_NORM_BYTE_8_8 = 53,
+       VFMT4_NORM_BYTE_8_8_8 = 54,
+       VFMT4_NORM_BYTE_8_8_8_8 = 55,
+       VFMT4_UINT_10_10_10_2 = 60,
+       VFMT4_NORM_UINT_10_10_10_2 = 61,
+       VFMT4_INT_10_10_10_2 = 62,
+       VFMT4_NORM_INT_10_10_10_2 = 63,
+};
+
+enum a4xx_tex_fmt {
+       TFMT4_NORM_USHORT_565 = 11,
+       TFMT4_NORM_USHORT_5551 = 10,
+       TFMT4_NORM_USHORT_4444 = 8,
+       TFMT4_NORM_UINT_X8Z24 = 71,
+       TFMT4_NORM_UINT_2_10_10_10 = 33,
+       TFMT4_NORM_UINT_A8 = 3,
+       TFMT4_NORM_UINT_L8_A8 = 13,
+       TFMT4_NORM_UINT_8 = 4,
+       TFMT4_NORM_UINT_8_8_8_8 = 28,
+       TFMT4_FLOAT_16 = 20,
+       TFMT4_FLOAT_16_16 = 40,
+       TFMT4_FLOAT_16_16_16_16 = 53,
+       TFMT4_FLOAT_32 = 43,
+       TFMT4_FLOAT_32_32 = 56,
+       TFMT4_FLOAT_32_32_32_32 = 63,
+};
+
+enum a4xx_depth_format {
+       DEPTH4_NONE = 0,
+       DEPTH4_16 = 1,
+       DEPTH4_24_8 = 2,
+};
+
+enum a4xx_tex_filter {
+       A4XX_TEX_NEAREST = 0,
+       A4XX_TEX_LINEAR = 1,
+};
+
+enum a4xx_tex_clamp {
+       A4XX_TEX_REPEAT = 0,
+       A4XX_TEX_CLAMP_TO_EDGE = 1,
+       A4XX_TEX_MIRROR_REPEAT = 2,
+       A4XX_TEX_CLAMP_NONE = 3,
+};
+
+enum a4xx_tex_swiz {
+       A4XX_TEX_X = 0,
+       A4XX_TEX_Y = 1,
+       A4XX_TEX_Z = 2,
+       A4XX_TEX_W = 3,
+       A4XX_TEX_ZERO = 4,
+       A4XX_TEX_ONE = 5,
+};
+
+enum a4xx_tex_type {
+       A4XX_TEX_1D = 0,
+       A4XX_TEX_2D = 1,
+       A4XX_TEX_CUBE = 2,
+       A4XX_TEX_3D = 3,
+};
+
+#define A4XX_CGC_HLSQ_EARLY_CYC__MASK                          0x00700000
+#define A4XX_CGC_HLSQ_EARLY_CYC__SHIFT                         20
+static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val)
+{
+       return ((val) << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT) & A4XX_CGC_HLSQ_EARLY_CYC__MASK;
+}
+#define A4XX_INT0_RBBM_GPU_IDLE                                        0x00000001
+#define A4XX_INT0_RBBM_AHB_ERROR                               0x00000002
+#define A4XX_INT0_RBBM_REG_TIMEOUT                             0x00000004
+#define A4XX_INT0_RBBM_ME_MS_TIMEOUT                           0x00000008
+#define A4XX_INT0_RBBM_PFP_MS_TIMEOUT                          0x00000010
+#define A4XX_INT0_RBBM_ATB_BUS_OVERFLOW                                0x00000020
+#define A4XX_INT0_VFD_ERROR                                    0x00000040
+#define A4XX_INT0_CP_SW_INT                                    0x00000080
+#define A4XX_INT0_CP_T0_PACKET_IN_IB                           0x00000100
+#define A4XX_INT0_CP_OPCODE_ERROR                              0x00000200
+#define A4XX_INT0_CP_RESERVED_BIT_ERROR                                0x00000400
+#define A4XX_INT0_CP_HW_FAULT                                  0x00000800
+#define A4XX_INT0_CP_DMA                                       0x00001000
+#define A4XX_INT0_CP_IB2_INT                                   0x00002000
+#define A4XX_INT0_CP_IB1_INT                                   0x00004000
+#define A4XX_INT0_CP_RB_INT                                    0x00008000
+#define A4XX_INT0_CP_REG_PROTECT_FAULT                         0x00010000
+#define A4XX_INT0_CP_RB_DONE_TS                                        0x00020000
+#define A4XX_INT0_CP_VS_DONE_TS                                        0x00040000
+#define A4XX_INT0_CP_PS_DONE_TS                                        0x00080000
+#define A4XX_INT0_CACHE_FLUSH_TS                               0x00100000
+#define A4XX_INT0_CP_AHB_ERROR_HALT                            0x00200000
+#define A4XX_INT0_MISC_HANG_DETECT                             0x01000000
+#define A4XX_INT0_UCHE_OOB_ACCESS                              0x02000000
+#define REG_A4XX_RB_GMEM_BASE_ADDR                             0x00000cc0
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_0                           0x00000cc7
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_1                           0x00000cc8
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_2                           0x00000cc9
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_3                           0x00000cca
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_4                           0x00000ccb
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_5                           0x00000ccc
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_6                           0x00000ccd
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_7                           0x00000cce
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_3                          0x00000cd2
+
+#define REG_A4XX_RB_FRAME_BUFFER_DIMENSION                     0x00000ce0
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK             0x00003fff
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT            0
+static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
+{
+       return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
+}
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK            0x3fff0000
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT           16
+static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
+{
+       return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
+}
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW0                            0x000020cc
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW1                            0x000020cd
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW2                            0x000020ce
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW3                            0x000020cf
+
+#define REG_A4XX_RB_MODE_CONTROL                               0x000020a0
+#define A4XX_RB_MODE_CONTROL_WIDTH__MASK                       0x0000003f
+#define A4XX_RB_MODE_CONTROL_WIDTH__SHIFT                      0
+static inline uint32_t A4XX_RB_MODE_CONTROL_WIDTH(uint32_t val)
+{
+       return ((val >> 5) << A4XX_RB_MODE_CONTROL_WIDTH__SHIFT) & A4XX_RB_MODE_CONTROL_WIDTH__MASK;
+}
+#define A4XX_RB_MODE_CONTROL_HEIGHT__MASK                      0x00003f00
+#define A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT                     8
+static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
+{
+       return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
+}
+
+#define REG_A4XX_RB_RENDER_CONTROL                             0x000020a1
+#define A4XX_RB_RENDER_CONTROL_BINNING_PASS                    0x00000001
+#define A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE              0x00000020
+
+#define REG_A4XX_RB_MSAA_CONTROL                               0x000020a2
+#define A4XX_RB_MSAA_CONTROL_DISABLE                           0x00001000
+#define A4XX_RB_MSAA_CONTROL_SAMPLES__MASK                     0x0000e000
+#define A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT                    13
+static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val)
+{
+       return ((val) << A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL_SAMPLES__MASK;
+}
+
+#define REG_A4XX_RB_MSAA_CONTROL2                              0x000020a3
+#define A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__MASK               0x00000380
+#define A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__SHIFT              7
+static inline uint32_t A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES(uint32_t val)
+{
+       return ((val) << A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__MASK;
+}
+#define A4XX_RB_MSAA_CONTROL2_VARYING                          0x00001000
+
+static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
+
+static inline uint32_t REG_A4XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
+#define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE                   0x00000008
+#define A4XX_RB_MRT_CONTROL_BLEND                              0x00000010
+#define A4XX_RB_MRT_CONTROL_BLEND2                             0x00000020
+#define A4XX_RB_MRT_CONTROL_FASTCLEAR                          0x00000400
+#define A4XX_RB_MRT_CONTROL_B11                                        0x00000800
+#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK             0x0f000000
+#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT            24
+static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+       return ((val) << A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020a5 + 0x5*i0; }
+#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK                        0x0000003f
+#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT               0
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a4xx_color_fmt val)
+{
+       return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK                 0x00000600
+#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT                        9
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+       return ((val) << A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK                  0x00001800
+#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT                 11
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+       return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK             0x007fc000
+#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT            14
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
+{
+       return ((val >> 4) << A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; }
+
+static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; }
+#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK                      0x0001fff8
+#define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT                     3
+static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val)
+{
+       return ((val) << A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT) & A4XX_RB_MRT_CONTROL3_STRIDE__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020a8 + 0x5*i0; }
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK         0x0000001f
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT                0
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+       return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK       0x000000e0
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT      5
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+{
+       return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK                0x00001f00
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT       8
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+       return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK       0x001f0000
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT      16
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+       return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK     0x00e00000
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT    21
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+{
+       return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK      0x1f000000
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT     24
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+       return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+#define REG_A4XX_RB_ALPHA_CONTROL                              0x000020f8
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST                       0x00000100
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK            0x00000e00
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT           9
+static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+       return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A4XX_RB_FS_OUTPUT                                  0x000020f9
+#define A4XX_RB_FS_OUTPUT_ENABLE_COLOR_PIPE                    0x00000001
+#define A4XX_RB_FS_OUTPUT_FAST_CLEAR                           0x00000100
+#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK                    0xffff0000
+#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT                   16
+static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
+{
+       return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK;
+}
+
+#define REG_A4XX_RB_RENDER_CONTROL3                            0x000020fb
+#define A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__MASK         0x0000001f
+#define A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__SHIFT                0
+static inline uint32_t A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE(uint32_t val)
+{
+       return ((val) << A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__SHIFT) & A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__MASK;
+}
+
+#define REG_A4XX_RB_COPY_CONTROL                               0x000020fc
+#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK                        0x00000003
+#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT               0
+static inline uint32_t A4XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
+{
+       return ((val) << A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
+}
+#define A4XX_RB_COPY_CONTROL_MODE__MASK                                0x00000070
+#define A4XX_RB_COPY_CONTROL_MODE__SHIFT                       4
+static inline uint32_t A4XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
+{
+       return ((val) << A4XX_RB_COPY_CONTROL_MODE__SHIFT) & A4XX_RB_COPY_CONTROL_MODE__MASK;
+}
+#define A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK                   0x00000f00
+#define A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT                  8
+static inline uint32_t A4XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
+{
+       return ((val) << A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
+}
+#define A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK                   0xffffc000
+#define A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT                  14
+static inline uint32_t A4XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
+{
+       return ((val >> 14) << A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
+}
+
+#define REG_A4XX_RB_COPY_DEST_BASE                             0x000020fd
+#define A4XX_RB_COPY_DEST_BASE_BASE__MASK                      0xfffffff0
+#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT                     4
+static inline uint32_t A4XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
+{
+       return ((val >> 4) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK;
+}
+
+#define REG_A4XX_RB_COPY_DEST_PITCH                            0x000020fe
+#define A4XX_RB_COPY_DEST_PITCH_PITCH__MASK                    0xffffffff
+#define A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT                   0
+static inline uint32_t A4XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
+{
+       return ((val >> 5) << A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A4XX_RB_COPY_DEST_PITCH_PITCH__MASK;
+}
+
+#define REG_A4XX_RB_COPY_DEST_INFO                             0x000020ff
+#define A4XX_RB_COPY_DEST_INFO_FORMAT__MASK                    0x000000fc
+#define A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT                   2
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_FORMAT(enum a4xx_color_fmt val)
+{
+       return ((val) << A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A4XX_RB_COPY_DEST_INFO_FORMAT__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_SWAP__MASK                      0x00000300
+#define A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT                     8
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
+{
+       return ((val) << A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A4XX_RB_COPY_DEST_INFO_SWAP__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK               0x00000c00
+#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT              10
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+       return ((val) << A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK          0x0003c000
+#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT         14
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
+{
+       return ((val) << A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK                    0x001c0000
+#define A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT                   18
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
+{
+       return ((val) << A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_TILE__MASK                      0x03000000
+#define A4XX_RB_COPY_DEST_INFO_TILE__SHIFT                     24
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_TILE(enum a4xx_tile_mode val)
+{
+       return ((val) << A4XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A4XX_RB_COPY_DEST_INFO_TILE__MASK;
+}
+
+#define REG_A4XX_RB_FS_OUTPUT_REG                              0x00002100
+#define A4XX_RB_FS_OUTPUT_REG_COLOR_PIPE_ENABLE                        0x00000001
+#define A4XX_RB_FS_OUTPUT_REG_FRAG_WRITES_Z                    0x00000020
+
+#define REG_A4XX_RB_DEPTH_CONTROL                              0x00002101
+#define A4XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z                    0x00000001
+#define A4XX_RB_DEPTH_CONTROL_Z_ENABLE                         0x00000002
+#define A4XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE                   0x00000004
+#define A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK                      0x00000070
+#define A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT                     4
+static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
+{
+       return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
+}
+#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE                                0x00000080
+#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE                  0x00010000
+#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE                    0x80000000
+
+#define REG_A4XX_RB_DEPTH_CLEAR                                        0x00002102
+
+#define REG_A4XX_RB_DEPTH_INFO                                 0x00002103
+#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK                  0x00000003
+#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT                 0
+static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum a4xx_depth_format val)
+{
+       return ((val) << A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
+}
+#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK                    0xfffff000
+#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT                   12
+static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
+{
+       return ((val >> 12) << A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+}
+
+#define REG_A4XX_RB_DEPTH_PITCH                                        0x00002104
+#define A4XX_RB_DEPTH_PITCH__MASK                              0xffffffff
+#define A4XX_RB_DEPTH_PITCH__SHIFT                             0
+static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val)
+{
+       return ((val >> 4) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK;
+}
+
+#define REG_A4XX_RB_DEPTH_PITCH2                               0x00002105
+#define A4XX_RB_DEPTH_PITCH2__MASK                             0xffffffff
+#define A4XX_RB_DEPTH_PITCH2__SHIFT                            0
+static inline uint32_t A4XX_RB_DEPTH_PITCH2(uint32_t val)
+{
+       return ((val >> 4) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_CONTROL                            0x00002106
+#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE                 0x00000001
+#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF              0x00000002
+#define A4XX_RB_STENCIL_CONTROL_STENCIL_READ                   0x00000004
+#define A4XX_RB_STENCIL_CONTROL_FUNC__MASK                     0x00000700
+#define A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT                    8
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+       return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_FAIL__MASK                     0x00003800
+#define A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT                    11
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+       return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZPASS__MASK                    0x0001c000
+#define A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT                   14
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+       return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK                    0x000e0000
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT                   17
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+       return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK                  0x00700000
+#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT                 20
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+       return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK                  0x03800000
+#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT                 23
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+       return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK                 0x1c000000
+#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT                        26
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+       return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK                 0xe0000000
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT                        29
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+       return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_CONTROL2                           0x00002107
+#define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER                        0x00000001
+
+#define REG_A4XX_RB_STENCILREFMASK                             0x0000210b
+#define A4XX_RB_STENCILREFMASK_STENCILREF__MASK                        0x000000ff
+#define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT               0
+static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+       return ((val) << A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_STENCILMASK__MASK               0x0000ff00
+#define A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT              8
+static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+       return ((val) << A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK          0x00ff0000
+#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT         16
+static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+       return ((val) << A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A4XX_RB_STENCILREFMASK_BF                          0x0000210c
+#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK             0x000000ff
+#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT            0
+static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+       return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK            0x0000ff00
+#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT           8
+static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+       return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK       0x00ff0000
+#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT      16
+static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+       return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A4XX_RB_BIN_OFFSET                                 0x0000210d
+#define A4XX_RB_BIN_OFFSET_WINDOW_OFFSET_DISABLE               0x80000000
+#define A4XX_RB_BIN_OFFSET_X__MASK                             0x00007fff
+#define A4XX_RB_BIN_OFFSET_X__SHIFT                            0
+static inline uint32_t A4XX_RB_BIN_OFFSET_X(uint32_t val)
+{
+       return ((val) << A4XX_RB_BIN_OFFSET_X__SHIFT) & A4XX_RB_BIN_OFFSET_X__MASK;
+}
+#define A4XX_RB_BIN_OFFSET_Y__MASK                             0x7fff0000
+#define A4XX_RB_BIN_OFFSET_Y__SHIFT                            16
+static inline uint32_t A4XX_RB_BIN_OFFSET_Y(uint32_t val)
+{
+       return ((val) << A4XX_RB_BIN_OFFSET_Y__SHIFT) & A4XX_RB_BIN_OFFSET_Y__MASK;
+}
+
+#define REG_A4XX_RB_VPORT_Z_CLAMP_MAX_15                       0x0000213f
+
+#define REG_A4XX_RBBM_HW_VERSION                               0x00000000
+
+#define REG_A4XX_RBBM_HW_CONFIGURATION                         0x00000002
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP(uint32_t i0) { return 0x00000004 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP_REG(uint32_t i0) { return 0x00000004 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP(uint32_t i0) { return 0x00000008 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP_REG(uint32_t i0) { return 0x00000008 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP(uint32_t i0) { return 0x0000000c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP_REG(uint32_t i0) { return 0x0000000c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP(uint32_t i0) { return 0x00000010 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x00000010 + 0x1*i0; }
+
+#define REG_A4XX_RBBM_CLOCK_CTL_UCHE                           0x00000014
+
+#define REG_A4XX_RBBM_CLOCK_CTL2_UCHE                          0x00000015
+
+#define REG_A4XX_RBBM_CLOCK_CTL3_UCHE                          0x00000016
+
+#define REG_A4XX_RBBM_CLOCK_CTL4_UCHE                          0x00000017
+
+#define REG_A4XX_RBBM_CLOCK_HYST_UCHE                          0x00000018
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_UCHE                         0x00000019
+
+#define REG_A4XX_RBBM_CLOCK_MODE_GPC                           0x0000001a
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_GPC                          0x0000001b
+
+#define REG_A4XX_RBBM_CLOCK_HYST_GPC                           0x0000001c
+
+#define REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM                   0x0000001d
+
+#define REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM                  0x0000001e
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM                 0x0000001f
+
+#define REG_A4XX_RBBM_CLOCK_CTL                                        0x00000020
+
+#define REG_A4XX_RBBM_SP_HYST_CNT                              0x00000021
+
+#define REG_A4XX_RBBM_SW_RESET_CMD                             0x00000022
+
+#define REG_A4XX_RBBM_AHB_CTL0                                 0x00000023
+
+#define REG_A4XX_RBBM_AHB_CTL1                                 0x00000024
+
+#define REG_A4XX_RBBM_AHB_CMD                                  0x00000025
+
+#define REG_A4XX_RBBM_RB_SUB_BLOCK_SEL_CTL                     0x00000026
+
+#define REG_A4XX_RBBM_RAM_ACC_63_32                            0x00000028
+
+#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL                     0x0000002b
+
+#define REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL                   0x0000002f
+
+#define REG_A4XX_RBBM_INTERFACE_HANG_MASK_CTL4                 0x00000034
+
+#define REG_A4XX_RBBM_INT_CLEAR_CMD                            0x00000036
+
+#define REG_A4XX_RBBM_INT_0_MASK                               0x00000037
+
+#define REG_A4XX_RBBM_RBBM_CTL                                 0x0000003e
+
+#define REG_A4XX_RBBM_AHB_DEBUG_CTL                            0x0000003f
+
+#define REG_A4XX_RBBM_VBIF_DEBUG_CTL                           0x00000041
+
+#define REG_A4XX_RBBM_CLOCK_CTL2                               0x00000042
+
+#define REG_A4XX_RBBM_BLOCK_SW_RESET_CMD                       0x00000045
+
+#define REG_A4XX_RBBM_RESET_CYCLES                             0x00000047
+
+#define REG_A4XX_RBBM_EXT_TRACE_BUS_CTL                                0x00000049
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_A                         0x0000004a
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_B                         0x0000004b
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_C                         0x0000004c
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D                         0x0000004d
+
+#define REG_A4XX_RBBM_PERFCTR_CP_0_LO                          0x0000009c
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP(uint32_t i0) { return 0x00000068 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP(uint32_t i0) { return 0x0000006c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP_REG(uint32_t i0) { return 0x0000006c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP(uint32_t i0) { return 0x00000070 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP_REG(uint32_t i0) { return 0x00000070 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP(uint32_t i0) { return 0x00000074 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP_REG(uint32_t i0) { return 0x00000074 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB(uint32_t i0) { return 0x00000078 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB_REG(uint32_t i0) { return 0x00000078 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB(uint32_t i0) { return 0x0000007c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB_REG(uint32_t i0) { return 0x0000007c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(uint32_t i0) { return 0x00000082 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU_REG(uint32_t i0) { return 0x00000082 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(uint32_t i0) { return 0x00000086 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU_REG(uint32_t i0) { return 0x00000086 + 0x1*i0; }
+
+#define REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM                      0x00000080
+
+#define REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM                       0x00000081
+
+#define REG_A4XX_RBBM_CLOCK_CTL_HLSQ                           0x0000008a
+
+#define REG_A4XX_RBBM_CLOCK_HYST_HLSQ                          0x0000008b
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_HLSQ                         0x0000008c
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM                     0x0000008d
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(uint32_t i0) { return 0x0000008e + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; }
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO                         0x00000168
+
+#define REG_A4XX_RBBM_PERFCTR_CTL                              0x00000170
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD0                                0x00000171
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD1                                0x00000172
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD2                                0x00000173
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO                    0x00000174
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI                    0x00000175
+
+#define REG_A4XX_RBBM_GPU_BUSY_MASKED                          0x0000017a
+
+#define REG_A4XX_RBBM_INT_0_STATUS                             0x0000017d
+
+#define REG_A4XX_RBBM_CLOCK_STATUS                             0x00000182
+
+#define REG_A4XX_RBBM_AHB_STATUS                               0x00000189
+
+#define REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS                      0x0000018c
+
+#define REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS                     0x0000018d
+
+#define REG_A4XX_RBBM_AHB_ERROR_STATUS                         0x0000018f
+
+#define REG_A4XX_RBBM_STATUS                                   0x00000191
+#define A4XX_RBBM_STATUS_HI_BUSY                               0x00000001
+#define A4XX_RBBM_STATUS_CP_ME_BUSY                            0x00000002
+#define A4XX_RBBM_STATUS_CP_PFP_BUSY                           0x00000004
+#define A4XX_RBBM_STATUS_CP_NRT_BUSY                           0x00004000
+#define A4XX_RBBM_STATUS_VBIF_BUSY                             0x00008000
+#define A4XX_RBBM_STATUS_TSE_BUSY                              0x00010000
+#define A4XX_RBBM_STATUS_RAS_BUSY                              0x00020000
+#define A4XX_RBBM_STATUS_RB_BUSY                               0x00040000
+#define A4XX_RBBM_STATUS_PC_DCALL_BUSY                         0x00080000
+#define A4XX_RBBM_STATUS_PC_VSD_BUSY                           0x00100000
+#define A4XX_RBBM_STATUS_VFD_BUSY                              0x00200000
+#define A4XX_RBBM_STATUS_VPC_BUSY                              0x00400000
+#define A4XX_RBBM_STATUS_UCHE_BUSY                             0x00800000
+#define A4XX_RBBM_STATUS_SP_BUSY                               0x01000000
+#define A4XX_RBBM_STATUS_TPL1_BUSY                             0x02000000
+#define A4XX_RBBM_STATUS_MARB_BUSY                             0x04000000
+#define A4XX_RBBM_STATUS_VSC_BUSY                              0x08000000
+#define A4XX_RBBM_STATUS_ARB_BUSY                              0x10000000
+#define A4XX_RBBM_STATUS_HLSQ_BUSY                             0x20000000
+#define A4XX_RBBM_STATUS_GPU_BUSY_NOHC                         0x40000000
+#define A4XX_RBBM_STATUS_GPU_BUSY                              0x80000000
+
+#define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5                   0x0000019f
+
+#define REG_A4XX_CP_SCRATCH_UMASK                              0x00000228
+
+#define REG_A4XX_CP_SCRATCH_ADDR                               0x00000229
+
+#define REG_A4XX_CP_RB_BASE                                    0x00000200
+
+#define REG_A4XX_CP_RB_CNTL                                    0x00000201
+
+#define REG_A4XX_CP_RB_WPTR                                    0x00000205
+
+#define REG_A4XX_CP_RB_RPTR_ADDR                               0x00000203
+
+#define REG_A4XX_CP_RB_RPTR                                    0x00000204
+
+#define REG_A4XX_CP_IB1_BASE                                   0x00000206
+
+#define REG_A4XX_CP_IB1_BUFSZ                                  0x00000207
+
+#define REG_A4XX_CP_IB2_BASE                                   0x00000208
+
+#define REG_A4XX_CP_IB2_BUFSZ                                  0x00000209
+
+#define REG_A4XX_CP_ME_RB_DONE_DATA                            0x00000217
+
+#define REG_A4XX_CP_QUEUE_THRESH2                              0x00000219
+
+#define REG_A4XX_CP_MERCIU_SIZE                                        0x0000021b
+
+#define REG_A4XX_CP_ROQ_ADDR                                   0x0000021c
+
+#define REG_A4XX_CP_ROQ_DATA                                   0x0000021d
+
+#define REG_A4XX_CP_MEQ_ADDR                                   0x0000021e
+
+#define REG_A4XX_CP_MEQ_DATA                                   0x0000021f
+
+#define REG_A4XX_CP_MERCIU_ADDR                                        0x00000220
+
+#define REG_A4XX_CP_MERCIU_DATA                                        0x00000221
+
+#define REG_A4XX_CP_MERCIU_DATA2                               0x00000222
+
+#define REG_A4XX_CP_PFP_UCODE_ADDR                             0x00000223
+
+#define REG_A4XX_CP_PFP_UCODE_DATA                             0x00000224
+
+#define REG_A4XX_CP_ME_RAM_WADDR                               0x00000225
+
+#define REG_A4XX_CP_ME_RAM_RADDR                               0x00000226
+
+#define REG_A4XX_CP_ME_RAM_DATA                                        0x00000227
+
+#define REG_A4XX_CP_PREEMPT                                    0x0000022a
+
+#define REG_A4XX_CP_CNTL                                       0x0000022c
+
+#define REG_A4XX_CP_ME_CNTL                                    0x0000022d
+
+#define REG_A4XX_CP_DEBUG                                      0x0000022e
+
+#define REG_A4XX_CP_DEBUG_ECO_CONTROL                          0x00000231
+
+#define REG_A4XX_CP_DRAW_STATE_ADDR                            0x00000232
+
+#define REG_A4XX_CP_PROTECT_REG_0                              0x00000240
+
+static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+
+#define REG_A4XX_CP_PROTECT_CTRL                               0x00000250
+
+#define REG_A4XX_CP_ST_BASE                                    0x000004c0
+
+#define REG_A4XX_CP_STQ_AVAIL                                  0x000004ce
+
+#define REG_A4XX_CP_MERCIU_STAT                                        0x000004d0
+
+#define REG_A4XX_CP_WFI_PEND_CTR                               0x000004d2
+
+#define REG_A4XX_CP_HW_FAULT                                   0x000004d8
+
+#define REG_A4XX_CP_PROTECT_STATUS                             0x000004da
+
+#define REG_A4XX_CP_EVENTS_IN_FLIGHT                           0x000004dd
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_0                           0x00000500
+
+#define REG_A4XX_CP_PERFCOMBINER_SELECT                                0x0000050b
+
+static inline uint32_t REG_A4XX_CP_SCRATCH(uint32_t i0) { return 0x00000578 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578 + 0x1*i0; }
+
+#define REG_A4XX_SP_VS_STATUS                                  0x00000ec0
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_11                          0x00000ecf
+
+#define REG_A4XX_SP_SP_CTRL_REG                                        0x000022c0
+#define A4XX_SP_SP_CTRL_REG_BINNING_PASS                       0x00080000
+
+#define REG_A4XX_SP_INSTR_CACHE_CTRL                           0x000022c1
+
+#define REG_A4XX_SP_VS_CTRL_REG0                               0x000022c4
+#define A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK                  0x00000001
+#define A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT                 0
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+       return ((val) << A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_VARYING                           0x00000002
+#define A4XX_SP_VS_CTRL_REG0_CACHEINVALID                      0x00000004
+#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK            0x000003f0
+#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT           4
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK            0x0003fc00
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT           10
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK             0x000c0000
+#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT            18
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK                  0x00100000
+#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT                 20
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+       return ((val) << A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_SUPERTHREADMODE                   0x00200000
+#define A4XX_SP_VS_CTRL_REG0_PIXLODENABLE                      0x00400000
+
+#define REG_A4XX_SP_VS_CTRL_REG1                               0x000022c5
+#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK                 0x000000ff
+#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT                        0
+static inline uint32_t A4XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK          0x7f000000
+#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT         24
+static inline uint32_t A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
+}
+
+#define REG_A4XX_SP_VS_PARAM_REG                               0x000022c6
+#define A4XX_SP_VS_PARAM_REG_POSREGID__MASK                    0x000000ff
+#define A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT                   0
+static inline uint32_t A4XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK                  0x0000ff00
+#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT                 8
+static inline uint32_t A4XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
+}
+#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK               0xfff00000
+#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT              20
+static inline uint32_t A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+#define A4XX_SP_VS_OUT_REG_A_REGID__MASK                       0x000001ff
+#define A4XX_SP_VS_OUT_REG_A_REGID__SHIFT                      0
+static inline uint32_t A4XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK                    0x00001e00
+#define A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT                   9
+static inline uint32_t A4XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_VS_OUT_REG_B_REGID__MASK                       0x01ff0000
+#define A4XX_SP_VS_OUT_REG_B_REGID__SHIFT                      16
+static inline uint32_t A4XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK                    0x1e000000
+#define A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT                   25
+static inline uint32_t A4XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK                   0x000000ff
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT                  0
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK                   0x0000ff00
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT                  8
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK                   0x00ff0000
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT                  16
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK                   0xff000000
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT                  24
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A4XX_SP_VS_OBJ_OFFSET_REG                          0x000022e0
+#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK      0x01ff0000
+#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT     16
+static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK                0xfe000000
+#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT       25
+static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_VS_OBJ_START                               0x000022e1
+
+#define REG_A4XX_SP_VS_PVT_MEM_PARAM                           0x000022e2
+
+#define REG_A4XX_SP_VS_PVT_MEM_ADDR                            0x000022e3
+
+#define REG_A4XX_SP_VS_LENGTH_REG                              0x000022e5
+
+#define REG_A4XX_SP_FS_CTRL_REG0                               0x000022e8
+#define A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK                  0x00000001
+#define A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT                 0
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+       return ((val) << A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_VARYING                           0x00000002
+#define A4XX_SP_FS_CTRL_REG0_CACHEINVALID                      0x00000004
+#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK            0x000003f0
+#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT           4
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+       return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK            0x0003fc00
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT           10
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+       return ((val) << A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK             0x000c0000
+#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT            18
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+       return ((val) << A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK                  0x00100000
+#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT                 20
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+       return ((val) << A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_SUPERTHREADMODE                   0x00200000
+#define A4XX_SP_FS_CTRL_REG0_PIXLODENABLE                      0x00400000
+
+#define REG_A4XX_SP_FS_CTRL_REG1                               0x000022e9
+#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK                 0x000000ff
+#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT                        0
+static inline uint32_t A4XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+       return ((val) << A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG1_VARYING                           0x00100000
+
+#define REG_A4XX_SP_FS_OBJ_OFFSET_REG                          0x000022ea
+#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK      0x01ff0000
+#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT     16
+static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK                0xfe000000
+#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT       25
+static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_FS_OBJ_START                               0x000022eb
+
+#define REG_A4XX_SP_FS_PVT_MEM_PARAM                           0x000022ec
+
+#define REG_A4XX_SP_FS_PVT_MEM_ADDR                            0x000022ed
+
+#define REG_A4XX_SP_FS_LENGTH_REG                              0x000022ef
+
+#define REG_A4XX_SP_FS_OUTPUT_REG                              0x000022f0
+#define A4XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE                     0x00000080
+#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK                        0x0000ff00
+#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT               8
+static inline uint32_t A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
+{
+       return ((val) << A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_FS_MRT(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
+#define A4XX_SP_FS_MRT_REG_REGID__MASK                         0x000000ff
+#define A4XX_SP_FS_MRT_REG_REGID__SHIFT                                0
+static inline uint32_t A4XX_SP_FS_MRT_REG_REGID(uint32_t val)
+{
+       return ((val) << A4XX_SP_FS_MRT_REG_REGID__SHIFT) & A4XX_SP_FS_MRT_REG_REGID__MASK;
+}
+#define A4XX_SP_FS_MRT_REG_HALF_PRECISION                      0x00000100
+#define A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK                     0x0003f000
+#define A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT                    12
+static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val)
+{
+       return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK;
+}
+
+#define REG_A4XX_SP_HS_OBJ_OFFSET_REG                          0x0000230d
+#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK      0x01ff0000
+#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT     16
+static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK                0xfe000000
+#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT       25
+static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_DS_OBJ_OFFSET_REG                          0x00002334
+#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK      0x01ff0000
+#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT     16
+static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK                0xfe000000
+#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT       25
+static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_GS_OBJ_OFFSET_REG                          0x0000235b
+#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK      0x01ff0000
+#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT     16
+static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK                0xfe000000
+#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT       25
+static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_GS_LENGTH_REG                              0x00002360
+
+#define REG_A4XX_VPC_DEBUG_RAM_SEL                             0x00000e60
+
+#define REG_A4XX_VPC_DEBUG_RAM_READ                            0x00000e61
+
+#define REG_A4XX_VPC_DEBUG_ECO_CONTROL                         0x00000e64
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_3                         0x00000e68
+
+#define REG_A4XX_VPC_ATTR                                      0x00002140
+#define A4XX_VPC_ATTR_TOTALATTR__MASK                          0x000001ff
+#define A4XX_VPC_ATTR_TOTALATTR__SHIFT                         0
+static inline uint32_t A4XX_VPC_ATTR_TOTALATTR(uint32_t val)
+{
+       return ((val) << A4XX_VPC_ATTR_TOTALATTR__SHIFT) & A4XX_VPC_ATTR_TOTALATTR__MASK;
+}
+#define A4XX_VPC_ATTR_PSIZE                                    0x00000200
+#define A4XX_VPC_ATTR_THRDASSIGN__MASK                         0x00003000
+#define A4XX_VPC_ATTR_THRDASSIGN__SHIFT                                12
+static inline uint32_t A4XX_VPC_ATTR_THRDASSIGN(uint32_t val)
+{
+       return ((val) << A4XX_VPC_ATTR_THRDASSIGN__SHIFT) & A4XX_VPC_ATTR_THRDASSIGN__MASK;
+}
+#define A4XX_VPC_ATTR_ENABLE                                   0x02000000
+
+#define REG_A4XX_VPC_PACK                                      0x00002141
+#define A4XX_VPC_PACK_NUMBYPASSVAR__MASK                       0x000000ff
+#define A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT                      0
+static inline uint32_t A4XX_VPC_PACK_NUMBYPASSVAR(uint32_t val)
+{
+       return ((val) << A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT) & A4XX_VPC_PACK_NUMBYPASSVAR__MASK;
+}
+#define A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK                     0x0000ff00
+#define A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT                    8
+static inline uint32_t A4XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
+{
+       return ((val) << A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
+}
+#define A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK                     0x00ff0000
+#define A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT                    16
+static inline uint32_t A4XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
+{
+       return ((val) << A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002142 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002142 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000214a + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000214a + 0x1*i0; }
+
+#define REG_A4XX_VPC_SO_FLUSH_WADDR_3                          0x0000216e
+
+#define REG_A4XX_VSC_BIN_SIZE                                  0x00000c00
+#define A4XX_VSC_BIN_SIZE_WIDTH__MASK                          0x0000001f
+#define A4XX_VSC_BIN_SIZE_WIDTH__SHIFT                         0
+static inline uint32_t A4XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+       return ((val >> 5) << A4XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A4XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A4XX_VSC_BIN_SIZE_HEIGHT__MASK                         0x000003e0
+#define A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT                                5
+static inline uint32_t A4XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+       return ((val >> 5) << A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A4XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A4XX_VSC_SIZE_ADDRESS                              0x00000c01
+
+#define REG_A4XX_VSC_SIZE_ADDRESS2                             0x00000c02
+
+#define REG_A4XX_VSC_DEBUG_ECO_CONTROL                         0x00000c03
+
+static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
+#define A4XX_VSC_PIPE_CONFIG_REG_X__MASK                       0x000003ff
+#define A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT                      0
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
+{
+       return ((val) << A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_X__MASK;
+}
+#define A4XX_VSC_PIPE_CONFIG_REG_Y__MASK                       0x000ffc00
+#define A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT                      10
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
+{
+       return ((val) << A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_Y__MASK;
+}
+#define A4XX_VSC_PIPE_CONFIG_REG_W__MASK                       0x00f00000
+#define A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT                      20
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
+{
+       return ((val) << A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_W__MASK;
+}
+#define A4XX_VSC_PIPE_CONFIG_REG_H__MASK                       0x0f000000
+#define A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT                      24
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
+{
+       return ((val) << A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_H__MASK;
+}
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
+
+#define REG_A4XX_VSC_PIPE_PARTIAL_POSN_1                       0x00000c41
+
+#define REG_A4XX_VSC_PERFCTR_VSC_SEL_0                         0x00000c50
+
+#define REG_A4XX_VSC_PERFCTR_VSC_SEL_1                         0x00000c51
+
+#define REG_A4XX_VFD_DEBUG_CONTROL                             0x00000e40
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_7                         0x00000e4a
+
+#define REG_A4XX_VFD_CONTROL_0                                 0x00002200
+#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK                 0x000000ff
+#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT                        0
+static inline uint32_t A4XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
+{
+       return ((val) << A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
+}
+#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK                 0x0001fe00
+#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT                        9
+static inline uint32_t A4XX_VFD_CONTROL_0_BYPASSATTROVS(uint32_t val)
+{
+       return ((val) << A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT) & A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK;
+}
+#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK               0x03f00000
+#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT              20
+static inline uint32_t A4XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
+{
+       return ((val) << A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
+}
+#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK             0xfc000000
+#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT            26
+static inline uint32_t A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
+{
+       return ((val) << A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
+}
+
+#define REG_A4XX_VFD_CONTROL_1                                 0x00002201
+#define A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK                    0x0000ffff
+#define A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT                   0
+static inline uint32_t A4XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
+{
+       return ((val) << A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
+}
+#define A4XX_VFD_CONTROL_1_REGID4VTX__MASK                     0x00ff0000
+#define A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT                    16
+static inline uint32_t A4XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+       return ((val) << A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A4XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A4XX_VFD_CONTROL_1_REGID4INST__MASK                    0xff000000
+#define A4XX_VFD_CONTROL_1_REGID4INST__SHIFT                   24
+static inline uint32_t A4XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+       return ((val) << A4XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A4XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+
+#define REG_A4XX_VFD_CONTROL_2                                 0x00002202
+
+#define REG_A4XX_VFD_CONTROL_3                                 0x00002203
+
+#define REG_A4XX_VFD_CONTROL_4                                 0x00002204
+
+#define REG_A4XX_VFD_INDEX_OFFSET                              0x00002208
+
+static inline uint32_t REG_A4XX_VFD_FETCH(uint32_t i0) { return 0x0000220a + 0x4*i0; }
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x0000220a + 0x4*i0; }
+#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK                 0x0000007f
+#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT                        0
+static inline uint32_t A4XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
+{
+       return ((val) << A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
+}
+#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK                 0x0001ff80
+#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT                        7
+static inline uint32_t A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
+{
+       return ((val) << A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
+}
+#define A4XX_VFD_FETCH_INSTR_0_SWITCHNEXT                      0x00080000
+#define A4XX_VFD_FETCH_INSTR_0_STEPRATE__MASK                  0xff000000
+#define A4XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT                 24
+static inline uint32_t A4XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
+{
+       return ((val) << A4XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_STEPRATE__MASK;
+}
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x0000220b + 0x4*i0; }
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_2(uint32_t i0) { return 0x0000220c + 0x4*i0; }
+#define A4XX_VFD_FETCH_INSTR_2_SIZE__MASK                      0xfffffff0
+#define A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT                     4
+static inline uint32_t A4XX_VFD_FETCH_INSTR_2_SIZE(uint32_t val)
+{
+       return ((val >> 4) << A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_2_SIZE__MASK;
+}
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_3(uint32_t i0) { return 0x0000220d + 0x4*i0; }
+
+static inline uint32_t REG_A4XX_VFD_DECODE(uint32_t i0) { return 0x0000228a + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000228a + 0x1*i0; }
+#define A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK                  0x0000000f
+#define A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT                 0
+static inline uint32_t A4XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
+{
+       return ((val) << A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_CONSTFILL                                0x00000010
+#define A4XX_VFD_DECODE_INSTR_FORMAT__MASK                     0x00000fc0
+#define A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT                    6
+static inline uint32_t A4XX_VFD_DECODE_INSTR_FORMAT(enum a4xx_vtx_fmt val)
+{
+       return ((val) << A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A4XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_REGID__MASK                      0x000ff000
+#define A4XX_VFD_DECODE_INSTR_REGID__SHIFT                     12
+static inline uint32_t A4XX_VFD_DECODE_INSTR_REGID(uint32_t val)
+{
+       return ((val) << A4XX_VFD_DECODE_INSTR_REGID__SHIFT) & A4XX_VFD_DECODE_INSTR_REGID__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_SWAP__MASK                       0x00c00000
+#define A4XX_VFD_DECODE_INSTR_SWAP__SHIFT                      22
+static inline uint32_t A4XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+       return ((val) << A4XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A4XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK                   0x1f000000
+#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT                  24
+static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
+{
+       return ((val) << A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_LASTCOMPVALID                    0x20000000
+#define A4XX_VFD_DECODE_INSTR_SWITCHNEXT                       0x40000000
+
+#define REG_A4XX_TPL1_DEBUG_ECO_CONTROL                                0x00000f00
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_7                         0x00000f0b
+
+#define REG_A4XX_TPL1_TP_TEX_OFFSET                            0x00002380
+
+#define REG_A4XX_TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR                        0x000023a6
+
+#define REG_A4XX_GRAS_TSE_STATUS                               0x00000c80
+
+#define REG_A4XX_GRAS_DEBUG_ECO_CONTROL                                0x00000c81
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0                                0x00000c88
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3                                0x00000c8b
+
+#define REG_A4XX_GRAS_CL_CLIP_CNTL                             0x00002000
+
+#define REG_A4XX_GRAS_CLEAR_CNTL                               0x00002003
+#define A4XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR                     0x00000001
+
+#define REG_A4XX_GRAS_CL_GB_CLIP_ADJ                           0x00002004
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK                    0x000003ff
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT                   0
+static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
+{
+       return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
+}
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK                    0x000ffc00
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT                   10
+static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
+{
+       return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_XOFFSET_0                       0x00002008
+#define A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK                     0xffffffff
+#define A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT                    0
+static inline uint32_t A4XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+       return ((fui(val)) << A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_XSCALE_0                                0x00002009
+#define A4XX_GRAS_CL_VPORT_XSCALE_0__MASK                      0xffffffff
+#define A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT                     0
+static inline uint32_t A4XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+       return ((fui(val)) << A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_YOFFSET_0                       0x0000200a
+#define A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK                     0xffffffff
+#define A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT                    0
+static inline uint32_t A4XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+       return ((fui(val)) << A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_YSCALE_0                                0x0000200b
+#define A4XX_GRAS_CL_VPORT_YSCALE_0__MASK                      0xffffffff
+#define A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT                     0
+static inline uint32_t A4XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+       return ((fui(val)) << A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_ZOFFSET_0                       0x0000200c
+#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK                     0xffffffff
+#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT                    0
+static inline uint32_t A4XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+       return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_ZSCALE_0                                0x0000200d
+#define A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK                      0xffffffff
+#define A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT                     0
+static inline uint32_t A4XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+       return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POINT_MINMAX                          0x00002070
+#define A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK                    0x0000ffff
+#define A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT                   0
+static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+       return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK                    0xffff0000
+#define A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT                   16
+static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+       return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POINT_SIZE                            0x00002071
+#define A4XX_GRAS_SU_POINT_SIZE__MASK                          0xffffffff
+#define A4XX_GRAS_SU_POINT_SIZE__SHIFT                         0
+static inline uint32_t A4XX_GRAS_SU_POINT_SIZE(float val)
+{
+       return ((((int32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_SIZE__SHIFT) & A4XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A4XX_GRAS_ALPHA_CONTROL                            0x00002073
+#define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE              0x00000004
+
+#define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE                     0x00002074
+#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK                   0xffffffff
+#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT                  0
+static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+       return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POLY_OFFSET_OFFSET                    0x00002075
+#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK                  0xffffffff
+#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT                 0
+static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+       return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_TL                      0x0000209f
+
+#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL                     0x0000207c
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE   0x80000000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK                 0x00007fff
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT                        0
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+       return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK                 0x7fff0000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT                        16
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+       return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_BR                     0x0000207d
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE   0x80000000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK                 0x00007fff
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT                        0
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+       return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK                 0x7fff0000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT                        16
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+       return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_BR                     0x0000209c
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE   0x80000000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK                 0x00007fff
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT                        0
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+       return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK                 0x7fff0000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT                        16
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+       return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_TL                     0x0000209d
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE   0x80000000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK                 0x00007fff
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT                        0
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+       return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK                 0x7fff0000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT                        16
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+       return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_DEPTH_CONTROL                            0x00002077
+#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK                   0x00000003
+#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT                  0
+static inline uint32_t A4XX_GRAS_DEPTH_CONTROL_FORMAT(enum a4xx_depth_format val)
+{
+       return ((val) << A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT) & A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_MODE_CONTROL                          0x00002078
+#define A4XX_GRAS_SU_MODE_CONTROL_CULL_FRONT                   0x00000001
+#define A4XX_GRAS_SU_MODE_CONTROL_CULL_BACK                    0x00000002
+#define A4XX_GRAS_SU_MODE_CONTROL_FRONT_CW                     0x00000004
+#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK          0x000007f8
+#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT         3
+static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
+{
+       return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+}
+#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET                  0x00000800
+#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS               0x00100000
+
+#define REG_A4XX_GRAS_SC_CONTROL                               0x0000207b
+#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK                 0x0000000c
+#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT                        2
+static inline uint32_t A4XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
+{
+       return ((val) << A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
+}
+#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK                        0x00000380
+#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT               7
+static inline uint32_t A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(uint32_t val)
+{
+       return ((val) << A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
+}
+#define A4XX_GRAS_SC_CONTROL_MSAA_DISABLE                      0x00000800
+#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK                 0x0000f000
+#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT                        12
+static inline uint32_t A4XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
+{
+       return ((val) << A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
+}
+
+#define REG_A4XX_UCHE_CACHE_MODE_CONTROL                       0x00000e80
+
+#define REG_A4XX_UCHE_TRAP_BASE_LO                             0x00000e83
+
+#define REG_A4XX_UCHE_TRAP_BASE_HI                             0x00000e84
+
+#define REG_A4XX_UCHE_CACHE_STATUS                             0x00000e88
+
+#define REG_A4XX_UCHE_INVALIDATE0                              0x00000e8a
+
+#define REG_A4XX_UCHE_INVALIDATE1                              0x00000e8b
+
+#define REG_A4XX_UCHE_CACHE_WAYS_VFD                           0x00000e8c
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7                       0x00000e95
+
+#define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD                                0x00000e00
+
+#define REG_A4XX_HLSQ_DEBUG_ECO_CONTROL                                0x00000e04
+
+#define REG_A4XX_HLSQ_PERF_PIPE_MASK                           0x00000e0e
+
+#define REG_A4XX_HLSQ_CONTROL_0_REG                            0x000023c0
+#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK             0x00000010
+#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT            4
+static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
+{
+       return ((val) << A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE            0x00000040
+#define A4XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART                        0x00000200
+#define A4XX_HLSQ_CONTROL_0_REG_RESERVED2                      0x00000400
+#define A4XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE                   0x04000000
+#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK                        0x08000000
+#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT               27
+static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE              0x10000000
+#define A4XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE              0x20000000
+#define A4XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE                   0x40000000
+#define A4XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT                  0x80000000
+
+#define REG_A4XX_HLSQ_CONTROL_1_REG                            0x000023c1
+#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK             0x00000040
+#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT            6
+static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
+{
+       return ((val) << A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE            0x00000100
+#define A4XX_HLSQ_CONTROL_1_REG_RESERVED1                      0x00000200
+#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORD                                0x02000000
+
+#define REG_A4XX_HLSQ_CONTROL_2_REG                            0x000023c2
+#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK       0xfc000000
+#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT      26
+static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
+}
+
+#define REG_A4XX_HLSQ_CONTROL_3_REG                            0x000023c3
+#define A4XX_HLSQ_CONTROL_3_REG_REGID__MASK                    0x000000ff
+#define A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT                   0
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_REGID__MASK;
+}
+
+#define REG_A4XX_HLSQ_VS_CONTROL_REG                           0x000023c5
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK             0x000000ff
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT            0
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK       0x0000ff00
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT      8
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK         0x00fe0000
+#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT                17
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK             0xff000000
+#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT            24
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_FS_CONTROL_REG                           0x000023c6
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK             0x000000ff
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT            0
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK       0x0000ff00
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT      8
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK         0x00fe0000
+#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT                17
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK             0xff000000
+#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT            24
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_HS_CONTROL_REG                           0x000023c7
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK             0x000000ff
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT            0
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK       0x0000ff00
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT      8
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK         0x00fe0000
+#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT                17
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK             0xff000000
+#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT            24
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_DS_CONTROL_REG                           0x000023c8
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK             0x000000ff
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT            0
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK       0x0000ff00
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT      8
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK         0x00fe0000
+#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT                17
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK             0xff000000
+#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT            24
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_GS_CONTROL_REG                           0x000023c9
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK             0x000000ff
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT            0
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK       0x0000ff00
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT      8
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK         0x00fe0000
+#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT                17
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK             0xff000000
+#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT            24
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_UPDATE_CONTROL                           0x000023db
+
+#define REG_A4XX_PC_BINNING_COMMAND                            0x00000d00
+#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE                 0x00000001
+
+#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE                    0x00000d0c
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_0                           0x00000d10
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_7                           0x00000d17
+
+#define REG_A4XX_PC_BIN_BASE                                   0x000021c0
+
+#define REG_A4XX_PC_PRIM_VTX_CNTL                              0x000021c4
+#define A4XX_PC_PRIM_VTX_CNTL_VAROUT                           0x00000001
+#define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST               0x02000000
+#define A4XX_PC_PRIM_VTX_CNTL_PSIZE                            0x04000000
+
+#define REG_A4XX_UNKNOWN_21C5                                  0x000021c5
+
+#define REG_A4XX_PC_RESTART_INDEX                              0x000021c6
+
+#define REG_A4XX_PC_GS_PARAM                                   0x000021e5
+
+#define REG_A4XX_PC_HS_PARAM                                   0x000021e7
+
+#define REG_A4XX_VBIF_VERSION                                  0x00003000
+
+#define REG_A4XX_VBIF_CLKON                                    0x00003001
+#define A4XX_VBIF_CLKON_FORCE_ON_TESTBUS                       0x00000001
+
+#define REG_A4XX_VBIF_ABIT_SORT                                        0x0000301c
+
+#define REG_A4XX_VBIF_ABIT_SORT_CONF                           0x0000301d
+
+#define REG_A4XX_VBIF_GATE_OFF_WRREQ_EN                                0x0000302a
+
+#define REG_A4XX_VBIF_IN_RD_LIM_CONF0                          0x0000302c
+
+#define REG_A4XX_VBIF_IN_RD_LIM_CONF1                          0x0000302d
+
+#define REG_A4XX_VBIF_IN_WR_LIM_CONF0                          0x00003030
+
+#define REG_A4XX_VBIF_IN_WR_LIM_CONF1                          0x00003031
+
+#define REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB                      0x00003049
+
+#define REG_A4XX_UNKNOWN_0CC5                                  0x00000cc5
+
+#define REG_A4XX_UNKNOWN_0CC6                                  0x00000cc6
+
+#define REG_A4XX_UNKNOWN_0D01                                  0x00000d01
+
+#define REG_A4XX_UNKNOWN_0E05                                  0x00000e05
+
+#define REG_A4XX_UNKNOWN_0E42                                  0x00000e42
+
+#define REG_A4XX_UNKNOWN_0EC2                                  0x00000ec2
+
+#define REG_A4XX_UNKNOWN_0EC3                                  0x00000ec3
+
+#define REG_A4XX_UNKNOWN_0F03                                  0x00000f03
+
+#define REG_A4XX_UNKNOWN_2001                                  0x00002001
+
+#define REG_A4XX_UNKNOWN_209B                                  0x0000209b
+
+#define REG_A4XX_UNKNOWN_20EF                                  0x000020ef
+
+#define REG_A4XX_UNKNOWN_20F0                                  0x000020f0
+
+#define REG_A4XX_UNKNOWN_20F1                                  0x000020f1
+
+#define REG_A4XX_UNKNOWN_20F2                                  0x000020f2
+
+#define REG_A4XX_UNKNOWN_20F3                                  0x000020f3
+
+#define REG_A4XX_UNKNOWN_20F4                                  0x000020f4
+
+#define REG_A4XX_UNKNOWN_20F5                                  0x000020f5
+
+#define REG_A4XX_UNKNOWN_20F6                                  0x000020f6
+
+#define REG_A4XX_UNKNOWN_20F7                                  0x000020f7
+
+#define REG_A4XX_UNKNOWN_2152                                  0x00002152
+
+#define REG_A4XX_UNKNOWN_2153                                  0x00002153
+
+#define REG_A4XX_UNKNOWN_2154                                  0x00002154
+
+#define REG_A4XX_UNKNOWN_2155                                  0x00002155
+
+#define REG_A4XX_UNKNOWN_2156                                  0x00002156
+
+#define REG_A4XX_UNKNOWN_2157                                  0x00002157
+
+#define REG_A4XX_UNKNOWN_21C3                                  0x000021c3
+
+#define REG_A4XX_UNKNOWN_21E6                                  0x000021e6
+
+#define REG_A4XX_UNKNOWN_2209                                  0x00002209
+
+#define REG_A4XX_UNKNOWN_22D7                                  0x000022d7
+
+#define REG_A4XX_UNKNOWN_2381                                  0x00002381
+
+#define REG_A4XX_UNKNOWN_23A0                                  0x000023a0
+
+#define REG_A4XX_TEX_SAMP_0                                    0x00000000
+#define A4XX_TEX_SAMP_0_XY_MAG__MASK                           0x00000006
+#define A4XX_TEX_SAMP_0_XY_MAG__SHIFT                          1
+static inline uint32_t A4XX_TEX_SAMP_0_XY_MAG(enum a4xx_tex_filter val)
+{
+       return ((val) << A4XX_TEX_SAMP_0_XY_MAG__SHIFT) & A4XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A4XX_TEX_SAMP_0_XY_MIN__MASK                           0x00000018
+#define A4XX_TEX_SAMP_0_XY_MIN__SHIFT                          3
+static inline uint32_t A4XX_TEX_SAMP_0_XY_MIN(enum a4xx_tex_filter val)
+{
+       return ((val) << A4XX_TEX_SAMP_0_XY_MIN__SHIFT) & A4XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A4XX_TEX_SAMP_0_WRAP_S__MASK                           0x000000e0
+#define A4XX_TEX_SAMP_0_WRAP_S__SHIFT                          5
+static inline uint32_t A4XX_TEX_SAMP_0_WRAP_S(enum a4xx_tex_clamp val)
+{
+       return ((val) << A4XX_TEX_SAMP_0_WRAP_S__SHIFT) & A4XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A4XX_TEX_SAMP_0_WRAP_T__MASK                           0x00000700
+#define A4XX_TEX_SAMP_0_WRAP_T__SHIFT                          8
+static inline uint32_t A4XX_TEX_SAMP_0_WRAP_T(enum a4xx_tex_clamp val)
+{
+       return ((val) << A4XX_TEX_SAMP_0_WRAP_T__SHIFT) & A4XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A4XX_TEX_SAMP_0_WRAP_R__MASK                           0x00003800
+#define A4XX_TEX_SAMP_0_WRAP_R__SHIFT                          11
+static inline uint32_t A4XX_TEX_SAMP_0_WRAP_R(enum a4xx_tex_clamp val)
+{
+       return ((val) << A4XX_TEX_SAMP_0_WRAP_R__SHIFT) & A4XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+
+#define REG_A4XX_TEX_SAMP_1                                    0x00000001
+#define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK                     0x0000000e
+#define A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT                    1
+static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+       return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A4XX_TEX_SAMP_1_MAX_LOD__MASK                          0x000fff00
+#define A4XX_TEX_SAMP_1_MAX_LOD__SHIFT                         8
+static inline uint32_t A4XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+       return ((((uint32_t)(val * 64.0))) << A4XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A4XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A4XX_TEX_SAMP_1_MIN_LOD__MASK                          0xfff00000
+#define A4XX_TEX_SAMP_1_MIN_LOD__SHIFT                         20
+static inline uint32_t A4XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+       return ((((uint32_t)(val * 64.0))) << A4XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A4XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_0                                   0x00000000
+#define A4XX_TEX_CONST_0_TILED                                 0x00000001
+#define A4XX_TEX_CONST_0_SWIZ_X__MASK                          0x00000070
+#define A4XX_TEX_CONST_0_SWIZ_X__SHIFT                         4
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_X(enum a4xx_tex_swiz val)
+{
+       return ((val) << A4XX_TEX_CONST_0_SWIZ_X__SHIFT) & A4XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A4XX_TEX_CONST_0_SWIZ_Y__MASK                          0x00000380
+#define A4XX_TEX_CONST_0_SWIZ_Y__SHIFT                         7
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Y(enum a4xx_tex_swiz val)
+{
+       return ((val) << A4XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A4XX_TEX_CONST_0_SWIZ_Z__MASK                          0x00001c00
+#define A4XX_TEX_CONST_0_SWIZ_Z__SHIFT                         10
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Z(enum a4xx_tex_swiz val)
+{
+       return ((val) << A4XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A4XX_TEX_CONST_0_SWIZ_W__MASK                          0x0000e000
+#define A4XX_TEX_CONST_0_SWIZ_W__SHIFT                         13
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_W(enum a4xx_tex_swiz val)
+{
+       return ((val) << A4XX_TEX_CONST_0_SWIZ_W__SHIFT) & A4XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A4XX_TEX_CONST_0_FMT__MASK                             0x1fc00000
+#define A4XX_TEX_CONST_0_FMT__SHIFT                            22
+static inline uint32_t A4XX_TEX_CONST_0_FMT(enum a4xx_tex_fmt val)
+{
+       return ((val) << A4XX_TEX_CONST_0_FMT__SHIFT) & A4XX_TEX_CONST_0_FMT__MASK;
+}
+#define A4XX_TEX_CONST_0_TYPE__MASK                            0x60000000
+#define A4XX_TEX_CONST_0_TYPE__SHIFT                           29
+static inline uint32_t A4XX_TEX_CONST_0_TYPE(enum a4xx_tex_type val)
+{
+       return ((val) << A4XX_TEX_CONST_0_TYPE__SHIFT) & A4XX_TEX_CONST_0_TYPE__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_1                                   0x00000001
+#define A4XX_TEX_CONST_1_HEIGHT__MASK                          0x00007fff
+#define A4XX_TEX_CONST_1_HEIGHT__SHIFT                         0
+static inline uint32_t A4XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+       return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK;
+}
+#define A4XX_TEX_CONST_1_WIDTH__MASK                           0x1fff8000
+#define A4XX_TEX_CONST_1_WIDTH__SHIFT                          15
+static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+       return ((val) << A4XX_TEX_CONST_1_WIDTH__SHIFT) & A4XX_TEX_CONST_1_WIDTH__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_2                                   0x00000002
+#define A4XX_TEX_CONST_2_PITCH__MASK                           0x3ffffe00
+#define A4XX_TEX_CONST_2_PITCH__SHIFT                          9
+static inline uint32_t A4XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+       return ((val) << A4XX_TEX_CONST_2_PITCH__SHIFT) & A4XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A4XX_TEX_CONST_2_SWAP__MASK                            0xc0000000
+#define A4XX_TEX_CONST_2_SWAP__SHIFT                           30
+static inline uint32_t A4XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
+{
+       return ((val) << A4XX_TEX_CONST_2_SWAP__SHIFT) & A4XX_TEX_CONST_2_SWAP__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_3                                   0x00000003
+#define A4XX_TEX_CONST_3_LAYERSZ__MASK                         0x0000000f
+#define A4XX_TEX_CONST_3_LAYERSZ__SHIFT                                0
+static inline uint32_t A4XX_TEX_CONST_3_LAYERSZ(uint32_t val)
+{
+       return ((val >> 12) << A4XX_TEX_CONST_3_LAYERSZ__SHIFT) & A4XX_TEX_CONST_3_LAYERSZ__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_4                                   0x00000004
+#define A4XX_TEX_CONST_4_BASE__MASK                            0xffffffff
+#define A4XX_TEX_CONST_4_BASE__SHIFT                           0
+static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val)
+{
+       return ((val) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_5                                   0x00000005
+
+#define REG_A4XX_TEX_CONST_6                                   0x00000006
+
+#define REG_A4XX_TEX_CONST_7                                   0x00000007
+
+
+#endif /* A4XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
new file mode 100644 (file)
index 0000000..9122183
--- /dev/null
@@ -0,0 +1,604 @@
+/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include "a4xx_gpu.h"
+#ifdef CONFIG_MSM_OCMEM
+#  include <soc/qcom/ocmem.h>
+#endif
+
+#define A4XX_INT0_MASK \
+       (A4XX_INT0_RBBM_AHB_ERROR |        \
+        A4XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
+        A4XX_INT0_CP_T0_PACKET_IN_IB |    \
+        A4XX_INT0_CP_OPCODE_ERROR |       \
+        A4XX_INT0_CP_RESERVED_BIT_ERROR | \
+        A4XX_INT0_CP_HW_FAULT |           \
+        A4XX_INT0_CP_IB1_INT |            \
+        A4XX_INT0_CP_IB2_INT |            \
+        A4XX_INT0_CP_RB_INT |             \
+        A4XX_INT0_CP_REG_PROTECT_FAULT |  \
+        A4XX_INT0_CP_AHB_ERROR_HALT |     \
+        A4XX_INT0_UCHE_OOB_ACCESS)
+
+extern bool hang_debug;
+static void a4xx_dump(struct msm_gpu *gpu);
+
+/*
+ * a4xx_enable_hwcg() - Program the clock control registers
+ * @device: The adreno device pointer
+ */
+static void a4xx_enable_hwcg(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       unsigned int i;
+       for (i = 0; i < 4; i++)
+               gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202);
+       for (i = 0; i < 4; i++)
+               gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222);
+       for (i = 0; i < 4; i++)
+               gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7);
+       for (i = 0; i < 4; i++)
+               gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111);
+       for (i = 0; i < 4; i++)
+               gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_SP(i), 0x22222222);
+       for (i = 0; i < 4; i++)
+               gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_SP(i), 0x00222222);
+       for (i = 0; i < 4; i++)
+               gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_SP(i), 0x00000104);
+       for (i = 0; i < 4; i++)
+               gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_SP(i), 0x00000081);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112);
+       for (i = 0; i < 4; i++)
+               gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_RB(i), 0x22222222);
+
+       /* Disable L1 clocking in A420 due to CCU issues with it */
+       for (i = 0; i < 4; i++) {
+               if (adreno_is_a420(adreno_gpu)) {
+                       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
+                                       0x00002020);
+               } else {
+                       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
+                                       0x00022020);
+               }
+       }
+
+       for (i = 0; i < 4; i++) {
+               gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
+                               0x00000922);
+       }
+
+       for (i = 0; i < 4; i++) {
+               gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
+                               0x00000000);
+       }
+
+       for (i = 0; i < 4; i++) {
+               gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
+                               0x00000001);
+       }
+
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00020000);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
+       gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
+}
+
+static void a4xx_me_init(struct msm_gpu *gpu)
+{
+       struct msm_ringbuffer *ring = gpu->rb;
+
+       OUT_PKT3(ring, CP_ME_INIT, 17);
+       OUT_RING(ring, 0x000003f7);
+       OUT_RING(ring, 0x00000000);
+       OUT_RING(ring, 0x00000000);
+       OUT_RING(ring, 0x00000000);
+       OUT_RING(ring, 0x00000080);
+       OUT_RING(ring, 0x00000100);
+       OUT_RING(ring, 0x00000180);
+       OUT_RING(ring, 0x00006600);
+       OUT_RING(ring, 0x00000150);
+       OUT_RING(ring, 0x0000014e);
+       OUT_RING(ring, 0x00000154);
+       OUT_RING(ring, 0x00000001);
+       OUT_RING(ring, 0x00000000);
+       OUT_RING(ring, 0x00000000);
+       OUT_RING(ring, 0x00000000);
+       OUT_RING(ring, 0x00000000);
+       OUT_RING(ring, 0x00000000);
+
+       gpu->funcs->flush(gpu);
+       gpu->funcs->idle(gpu);
+}
+
+static int a4xx_hw_init(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
+       uint32_t *ptr, len;
+       int i, ret;
+
+       if (adreno_is_a4xx(adreno_gpu)) {
+               gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
+               gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
+               gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
+               gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+               gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
+               gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+               gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
+               gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+       } else {
+               BUG();
+       }
+
+       /* Make all blocks contribute to the GPU BUSY perf counter */
+       gpu_write(gpu, REG_A4XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
+
+       /* Tune the hystersis counters for SP and CP idle detection */
+       gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
+       gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+
+        /* Enable the RBBM error reporting bits */
+       gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
+
+       /* Enable AHB error reporting*/
+       gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL1, 0xa6ffffff);
+
+       /* Enable power counters*/
+       gpu_write(gpu, REG_A4XX_RBBM_RBBM_CTL, 0x00000030);
+
+       /*
+        * Turn on hang detection - this spews a lot of useful information
+        * into the RBBM registers on a hang:
+        */
+       gpu_write(gpu, REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL,
+                       (1 << 30) | 0xFFFF);
+
+       gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
+                       (unsigned int)(a4xx_gpu->ocmem_base >> 14));
+
+       /* Turn on performance counters: */
+       gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
+
+       /* Disable L2 bypass to avoid UCHE out of bounds errors */
+       gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
+       gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
+
+       gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
+                       (adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
+
+       a4xx_enable_hwcg(gpu);
+
+       /*
+        * For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2
+        * due to timing issue with HLSQ_TP_CLK_EN
+        */
+       if (adreno_is_a420(adreno_gpu)) {
+               unsigned int val;
+               val = gpu_read(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ);
+               val &= ~A4XX_CGC_HLSQ_EARLY_CYC__MASK;
+               val |= 2 << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT;
+               gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
+       }
+
+       ret = adreno_hw_init(gpu);
+       if (ret)
+               return ret;
+
+       /* setup access protection: */
+       gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
+
+       /* RBBM registers */
+       gpu_write(gpu, REG_A4XX_CP_PROTECT(0), 0x62000010);
+       gpu_write(gpu, REG_A4XX_CP_PROTECT(1), 0x63000020);
+       gpu_write(gpu, REG_A4XX_CP_PROTECT(2), 0x64000040);
+       gpu_write(gpu, REG_A4XX_CP_PROTECT(3), 0x65000080);
+       gpu_write(gpu, REG_A4XX_CP_PROTECT(4), 0x66000100);
+       gpu_write(gpu, REG_A4XX_CP_PROTECT(5), 0x64000200);
+
+       /* CP registers */
+       gpu_write(gpu, REG_A4XX_CP_PROTECT(6), 0x67000800);
+       gpu_write(gpu, REG_A4XX_CP_PROTECT(7), 0x64001600);
+
+
+       /* RB registers */
+       gpu_write(gpu, REG_A4XX_CP_PROTECT(8), 0x60003300);
+
+       /* HLSQ registers */
+       gpu_write(gpu, REG_A4XX_CP_PROTECT(9), 0x60003800);
+
+       /* VPC registers */
+       gpu_write(gpu, REG_A4XX_CP_PROTECT(10), 0x61003980);
+
+       /* SMMU registers */
+       gpu_write(gpu, REG_A4XX_CP_PROTECT(11), 0x6e010000);
+
+       gpu_write(gpu, REG_A4XX_RBBM_INT_0_MASK, A4XX_INT0_MASK);
+
+       ret = adreno_hw_init(gpu);
+       if (ret)
+               return ret;
+
+       /* Load PM4: */
+       ptr = (uint32_t *)(adreno_gpu->pm4->data);
+       len = adreno_gpu->pm4->size / 4;
+       DBG("loading PM4 ucode version: %u", ptr[0]);
+       gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0);
+       for (i = 1; i < len; i++)
+               gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]);
+
+       /* Load PFP: */
+       ptr = (uint32_t *)(adreno_gpu->pfp->data);
+       len = adreno_gpu->pfp->size / 4;
+       DBG("loading PFP ucode version: %u", ptr[0]);
+
+       gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0);
+       for (i = 1; i < len; i++)
+               gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+       /* clear ME_HALT to start micro engine */
+       gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
+
+       a4xx_me_init(gpu);
+       return 0;
+}
+
+static void a4xx_recover(struct msm_gpu *gpu)
+{
+       /* dump registers before resetting gpu, if enabled: */
+       if (hang_debug)
+               a4xx_dump(gpu);
+
+       gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 1);
+       gpu_read(gpu, REG_A4XX_RBBM_SW_RESET_CMD);
+       gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 0);
+       adreno_recover(gpu);
+}
+
+static void a4xx_destroy(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
+
+       DBG("%s", gpu->name);
+
+       adreno_gpu_cleanup(adreno_gpu);
+
+#ifdef CONFIG_MSM_OCMEM
+       if (a4xx_gpu->ocmem_base)
+               ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl);
+#endif
+
+       kfree(a4xx_gpu);
+}
+
+static void a4xx_idle(struct msm_gpu *gpu)
+{
+       /* wait for ringbuffer to drain: */
+       adreno_idle(gpu);
+
+       /* then wait for GPU to finish: */
+       if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
+                                       A4XX_RBBM_STATUS_GPU_BUSY)))
+               DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+
+       /* TODO maybe we need to reset GPU here to recover from hang? */
+}
+
+static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
+{
+       uint32_t status;
+
+       status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS);
+       DBG("%s: Int status %08x", gpu->name, status);
+
+       gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status);
+
+       msm_gpu_retire(gpu);
+
+       return IRQ_HANDLED;
+}
+
+static const unsigned int a4xx_registers[] = {
+       /* RBBM */
+       0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
+       0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
+       0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
+       /* CP */
+       0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
+       0x0578, 0x058F,
+       /* VSC */
+       0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
+       /* GRAS */
+       0x0C80, 0x0C81, 0x0C88, 0x0C8F,
+       /* RB */
+       0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
+       /* PC */
+       0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
+       /* VFD */
+       0x0E40, 0x0E4A,
+       /* VPC */
+       0x0E60, 0x0E61, 0x0E63, 0x0E68,
+       /* UCHE */
+       0x0E80, 0x0E84, 0x0E88, 0x0E95,
+       /* VMIDMT */
+       0x1000, 0x1000, 0x1002, 0x1002, 0x1004, 0x1004, 0x1008, 0x100A,
+       0x100C, 0x100D, 0x100F, 0x1010, 0x1012, 0x1016, 0x1024, 0x1024,
+       0x1027, 0x1027, 0x1100, 0x1100, 0x1102, 0x1102, 0x1104, 0x1104,
+       0x1110, 0x1110, 0x1112, 0x1116, 0x1124, 0x1124, 0x1300, 0x1300,
+       0x1380, 0x1380,
+       /* GRAS CTX 0 */
+       0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
+       /* PC CTX 0 */
+       0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
+       /* VFD CTX 0 */
+       0x2200, 0x2204, 0x2208, 0x22A9,
+       /* GRAS CTX 1 */
+       0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
+       /* PC CTX 1 */
+       0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
+       /* VFD CTX 1 */
+       0x2600, 0x2604, 0x2608, 0x26A9,
+       /* XPU */
+       0x2C00, 0x2C01, 0x2C10, 0x2C10, 0x2C12, 0x2C16, 0x2C1D, 0x2C20,
+       0x2C28, 0x2C28, 0x2C30, 0x2C30, 0x2C32, 0x2C36, 0x2C40, 0x2C40,
+       0x2C50, 0x2C50, 0x2C52, 0x2C56, 0x2C80, 0x2C80, 0x2C94, 0x2C95,
+       /* VBIF */
+       0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x301D, 0x3020, 0x3022,
+       0x3024, 0x3026, 0x3028, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031,
+       0x3034, 0x3036, 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040,
+       0x3049, 0x3049, 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068,
+       0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
+       0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
+       0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
+       0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
+       0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x330C, 0x330C,
+       0x3310, 0x3310, 0x3400, 0x3401, 0x3410, 0x3410, 0x3412, 0x3416,
+       0x341D, 0x3420, 0x3428, 0x3428, 0x3430, 0x3430, 0x3432, 0x3436,
+       0x3440, 0x3440, 0x3450, 0x3450, 0x3452, 0x3456, 0x3480, 0x3480,
+       0x3494, 0x3495, 0x4000, 0x4000, 0x4002, 0x4002, 0x4004, 0x4004,
+       0x4008, 0x400A, 0x400C, 0x400D, 0x400F, 0x4012, 0x4014, 0x4016,
+       0x401D, 0x401D, 0x4020, 0x4027, 0x4060, 0x4062, 0x4200, 0x4200,
+       0x4300, 0x4300, 0x4400, 0x4400, 0x4500, 0x4500, 0x4800, 0x4802,
+       0x480F, 0x480F, 0x4811, 0x4811, 0x4813, 0x4813, 0x4815, 0x4816,
+       0x482B, 0x482B, 0x4857, 0x4857, 0x4883, 0x4883, 0x48AF, 0x48AF,
+       0x48C5, 0x48C5, 0x48E5, 0x48E5, 0x4905, 0x4905, 0x4925, 0x4925,
+       0x4945, 0x4945, 0x4950, 0x4950, 0x495B, 0x495B, 0x4980, 0x498E,
+       0x4B00, 0x4B00, 0x4C00, 0x4C00, 0x4D00, 0x4D00, 0x4E00, 0x4E00,
+       0x4E80, 0x4E80, 0x4F00, 0x4F00, 0x4F08, 0x4F08, 0x4F10, 0x4F10,
+       0x4F18, 0x4F18, 0x4F20, 0x4F20, 0x4F30, 0x4F30, 0x4F60, 0x4F60,
+       0x4F80, 0x4F81, 0x4F88, 0x4F89, 0x4FEE, 0x4FEE, 0x4FF3, 0x4FF3,
+       0x6000, 0x6001, 0x6008, 0x600F, 0x6014, 0x6016, 0x6018, 0x601B,
+       0x61FD, 0x61FD, 0x623C, 0x623C, 0x6380, 0x6380, 0x63A0, 0x63A0,
+       0x63C0, 0x63C1, 0x63C8, 0x63C9, 0x63D0, 0x63D4, 0x63D6, 0x63D6,
+       0x63EE, 0x63EE, 0x6400, 0x6401, 0x6408, 0x640F, 0x6414, 0x6416,
+       0x6418, 0x641B, 0x65FD, 0x65FD, 0x663C, 0x663C, 0x6780, 0x6780,
+       0x67A0, 0x67A0, 0x67C0, 0x67C1, 0x67C8, 0x67C9, 0x67D0, 0x67D4,
+       0x67D6, 0x67D6, 0x67EE, 0x67EE, 0x6800, 0x6801, 0x6808, 0x680F,
+       0x6814, 0x6816, 0x6818, 0x681B, 0x69FD, 0x69FD, 0x6A3C, 0x6A3C,
+       0x6B80, 0x6B80, 0x6BA0, 0x6BA0, 0x6BC0, 0x6BC1, 0x6BC8, 0x6BC9,
+       0x6BD0, 0x6BD4, 0x6BD6, 0x6BD6, 0x6BEE, 0x6BEE,
+       ~0 /* sentinel */
+};
+
+#ifdef CONFIG_DEBUG_FS
+static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+       gpu->funcs->pm_resume(gpu);
+
+       seq_printf(m, "status:   %08x\n",
+                       gpu_read(gpu, REG_A4XX_RBBM_STATUS));
+       gpu->funcs->pm_suspend(gpu);
+
+       adreno_show(gpu, m);
+
+}
+#endif
+
+/* Register offset defines for A4XX, in order of enum adreno_regs */
+static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
+                       REG_A4XX_CP_PFP_UCODE_DATA),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
+                       REG_A4XX_CP_PFP_UCODE_ADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
+                       REG_A4XX_CP_PROTECT_STATUS),
+       REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
+                       REG_A4XX_RBBM_PERFCTR_CTL),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
+                       REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
+                       REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
+                       REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
+                       REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
+                       REG_A4XX_RBBM_INT_0_STATUS),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
+                       REG_A4XX_RBBM_AHB_ERROR_STATUS),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
+                       REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
+                       REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
+       REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
+                       REG_A4XX_VPC_DEBUG_RAM_SEL),
+       REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
+                       REG_A4XX_VPC_DEBUG_RAM_READ),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
+                       REG_A4XX_RBBM_INT_CLEAR_CMD),
+       REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
+                       REG_A4XX_VSC_SIZE_ADDRESS),
+       REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
+       REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
+                       REG_A4XX_SP_VS_PVT_MEM_ADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
+                       REG_A4XX_SP_FS_PVT_MEM_ADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
+                       REG_A4XX_SP_VS_OBJ_START),
+       REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
+                       REG_A4XX_SP_FS_OBJ_START),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
+                       REG_A4XX_RBBM_SW_RESET_CMD),
+       REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
+                       REG_A4XX_UCHE_INVALIDATE0),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
+                       REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
+       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
+                       REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
+};
+
+static void a4xx_dump(struct msm_gpu *gpu)
+{
+       adreno_dump(gpu);
+       printk("status:   %08x\n",
+                       gpu_read(gpu, REG_A4XX_RBBM_STATUS));
+       adreno_dump(gpu);
+}
+
+static const struct adreno_gpu_funcs funcs = {
+       .base = {
+               .get_param = adreno_get_param,
+               .hw_init = a4xx_hw_init,
+               .pm_suspend = msm_gpu_pm_suspend,
+               .pm_resume = msm_gpu_pm_resume,
+               .recover = a4xx_recover,
+               .last_fence = adreno_last_fence,
+               .submit = adreno_submit,
+               .flush = adreno_flush,
+               .idle = a4xx_idle,
+               .irq = a4xx_irq,
+               .destroy = a4xx_destroy,
+#ifdef CONFIG_DEBUG_FS
+               .show = a4xx_show,
+#endif
+       },
+};
+
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
+{
+       struct a4xx_gpu *a4xx_gpu = NULL;
+       struct adreno_gpu *adreno_gpu;
+       struct msm_gpu *gpu;
+       struct msm_drm_private *priv = dev->dev_private;
+       struct platform_device *pdev = priv->gpu_pdev;
+       int ret;
+
+       if (!pdev) {
+               dev_err(dev->dev, "no a4xx device\n");
+               ret = -ENXIO;
+               goto fail;
+       }
+
+       a4xx_gpu = kzalloc(sizeof(*a4xx_gpu), GFP_KERNEL);
+       if (!a4xx_gpu) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       adreno_gpu = &a4xx_gpu->base;
+       gpu = &adreno_gpu->base;
+
+       a4xx_gpu->pdev = pdev;
+
+       gpu->perfcntrs = NULL;
+       gpu->num_perfcntrs = 0;
+
+       adreno_gpu->registers = a4xx_registers;
+       adreno_gpu->reg_offsets = a4xx_register_offsets;
+
+       ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+       if (ret)
+               goto fail;
+
+       /* if needed, allocate gmem: */
+       if (adreno_is_a4xx(adreno_gpu)) {
+#ifdef CONFIG_MSM_OCMEM
+               /* TODO this is different/missing upstream: */
+               struct ocmem_buf *ocmem_hdl =
+                               ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
+
+               a4xx_gpu->ocmem_hdl = ocmem_hdl;
+               a4xx_gpu->ocmem_base = ocmem_hdl->addr;
+               adreno_gpu->gmem = ocmem_hdl->len;
+               DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
+                               a4xx_gpu->ocmem_base);
+#endif
+       }
+
+       if (!gpu->mmu) {
+               /* TODO we think it is possible to configure the GPU to
+                * restrict access to VRAM carveout.  But the required
+                * registers are unknown.  For now just bail out and
+                * limp along with just modesetting.  If it turns out
+                * to not be possible to restrict access, then we must
+                * implement a cmdstream validator.
+                */
+               dev_err(dev->dev, "No memory protection without IOMMU\n");
+               ret = -ENXIO;
+               goto fail;
+       }
+
+       return gpu;
+
+fail:
+       if (a4xx_gpu)
+               a4xx_destroy(&a4xx_gpu->base.base);
+
+       return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
new file mode 100644 (file)
index 0000000..0124720
--- /dev/null
@@ -0,0 +1,34 @@
+/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A4XX_GPU_H__
+#define __A4XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a4xx.xml.h"
+
+struct a4xx_gpu {
+       struct adreno_gpu base;
+       struct platform_device *pdev;
+
+       /* if OCMEM is used for GMEM: */
+       uint32_t ocmem_base;
+       void *ocmem_hdl;
+};
+#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base)
+
+#endif /* __A4XX_GPU_H__ */
index cc341bc62b51f8ff860f85e11663c6d6d2f2ff81..a4b33af9338daedd4ebd7345987326c416188be4 100644 (file)
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (   9859 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14960 bytes, from 2014-07-27 17:22:13)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  58020 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  41068 bytes, from 2014-08-01 12:22:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  15053 bytes, from 2014-11-09 15:45:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  63169 bytes, from 2014-11-13 22:44:18)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  49097 bytes, from 2014-11-14 15:38:00)
 
 Copyright (C) 2013-2014 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -105,6 +105,7 @@ enum adreno_rb_dither_mode {
 enum adreno_rb_depth_format {
        DEPTHX_16 = 0,
        DEPTHX_24_8 = 1,
+       DEPTHX_32 = 2,
 };
 
 enum adreno_rb_copy_control_mode {
@@ -132,6 +133,7 @@ enum a3xx_threadmode {
 };
 
 enum a3xx_instrbuffermode {
+       CACHE = 0,
        BUFFER = 1,
 };
 
@@ -140,6 +142,13 @@ enum a3xx_threadsize {
        FOUR_QUADS = 1,
 };
 
+enum a3xx_color_swap {
+       WZYX = 0,
+       WXYZ = 1,
+       ZYXW = 2,
+       XYZW = 3,
+};
+
 #define REG_AXXX_CP_RB_BASE                                    0x000001c0
 
 #define REG_AXXX_CP_RB_CNTL                                    0x000001c1
index 7ab85af3a7dbcc8b83299229bbbeac39ca99e05f..be83dee83d08246cdf29922caa74b6fab75584ce 100644 (file)
@@ -2,6 +2,8 @@
  * Copyright (C) 2013-2014 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
  * the Free Software Foundation.
@@ -28,6 +30,7 @@ MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!
 module_param_named(hang_debug, hang_debug, bool, 0600);
 
 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
 
 static const struct adreno_info gpulist[] = {
        {
@@ -54,6 +57,14 @@ static const struct adreno_info gpulist[] = {
                .pfpfw = "a330_pfp.fw",
                .gmem  = SZ_1M,
                .init  = a3xx_gpu_init,
+       }, {
+               .rev   = ADRENO_REV(4, 2, 0, ANY_ID),
+               .revn  = 420,
+               .name  = "A420",
+               .pm4fw = "a420_pm4.fw",
+               .pfpfw = "a420_pfp.fw",
+               .gmem  = (SZ_1M + SZ_512K),
+               .init  = a4xx_gpu_init,
        },
 };
 
@@ -61,6 +72,8 @@ MODULE_FIRMWARE("a300_pm4.fw");
 MODULE_FIRMWARE("a300_pfp.fw");
 MODULE_FIRMWARE("a330_pm4.fw");
 MODULE_FIRMWARE("a330_pfp.fw");
+MODULE_FIRMWARE("a420_pm4.fw");
+MODULE_FIRMWARE("a420_pfp.fw");
 
 static inline bool _rev_match(uint8_t entry, uint8_t id)
 {
index 6afa29167fee74fffaae26e8d7ded15d05ec87eb..aa873048308b8e6e3e431152963aed5b04c13f02 100644 (file)
@@ -2,6 +2,8 @@
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
  * the Free Software Foundation.
@@ -63,19 +65,21 @@ int adreno_hw_init(struct msm_gpu *gpu)
        }
 
        /* Setup REG_CP_RB_CNTL: */
-       gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
+       adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
                        /* size is log2(quad-words): */
                        AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
                        AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
 
        /* Setup ringbuffer address: */
-       gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
-       gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr));
+       adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
+       adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
+                       rbmemptr(adreno_gpu, rptr));
 
        /* Setup scratch/timestamp: */
-       gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence));
+       adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_ADDR,
+                       rbmemptr(adreno_gpu, fence));
 
-       gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1);
+       adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_UMSK, 0x1);
 
        return 0;
 }
@@ -151,7 +155,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
        OUT_RING(ring, submit->fence);
 
-       if (adreno_is_a3xx(adreno_gpu)) {
+       if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
                /* Flush HLSQ lazy updates to make sure there is nothing
                 * pending for indirect loads after the timestamp has
                 * passed:
@@ -188,12 +192,13 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 
 void adreno_flush(struct msm_gpu *gpu)
 {
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        uint32_t wptr = get_wptr(gpu->rb);
 
        /* ensure writes to ringbuffer have hit system memory: */
        mb();
 
-       gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr);
+       adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
 }
 
 void adreno_idle(struct msm_gpu *gpu)
@@ -319,6 +324,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
        DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
                        gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
 
+       ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
+                       adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
+                       RB_SIZE);
+       if (ret)
+               return ret;
+
        ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
        if (ret) {
                dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
@@ -333,12 +344,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                return ret;
        }
 
-       ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
-                       adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
-                       RB_SIZE);
-       if (ret)
-               return ret;
-
        mmu = gpu->mmu;
        if (mmu) {
                ret = mmu->funcs->attach(mmu, iommu_ports,
index 52f0515797532cef0d4496918fe421306946d0a5..a0cc30977e671d7286d52bba94cb3d09192c55d6 100644 (file)
@@ -2,6 +2,8 @@
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
  * the Free Software Foundation.
 #include "adreno_common.xml.h"
 #include "adreno_pm4.xml.h"
 
+#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
+/**
+ * adreno_regs: List of registers that are used in across all
+ * 3D devices. Each device type has different offset value for the same
+ * register, so an array of register offsets are declared for every device
+ * and are indexed by the enumeration values defined in this enum
+ */
+enum adreno_regs {
+       REG_ADRENO_CP_DEBUG,
+       REG_ADRENO_CP_ME_RAM_WADDR,
+       REG_ADRENO_CP_ME_RAM_DATA,
+       REG_ADRENO_CP_PFP_UCODE_DATA,
+       REG_ADRENO_CP_PFP_UCODE_ADDR,
+       REG_ADRENO_CP_WFI_PEND_CTR,
+       REG_ADRENO_CP_RB_BASE,
+       REG_ADRENO_CP_RB_RPTR_ADDR,
+       REG_ADRENO_CP_RB_RPTR,
+       REG_ADRENO_CP_RB_WPTR,
+       REG_ADRENO_CP_PROTECT_CTRL,
+       REG_ADRENO_CP_ME_CNTL,
+       REG_ADRENO_CP_RB_CNTL,
+       REG_ADRENO_CP_IB1_BASE,
+       REG_ADRENO_CP_IB1_BUFSZ,
+       REG_ADRENO_CP_IB2_BASE,
+       REG_ADRENO_CP_IB2_BUFSZ,
+       REG_ADRENO_CP_TIMESTAMP,
+       REG_ADRENO_CP_ME_RAM_RADDR,
+       REG_ADRENO_CP_ROQ_ADDR,
+       REG_ADRENO_CP_ROQ_DATA,
+       REG_ADRENO_CP_MERCIU_ADDR,
+       REG_ADRENO_CP_MERCIU_DATA,
+       REG_ADRENO_CP_MERCIU_DATA2,
+       REG_ADRENO_CP_MEQ_ADDR,
+       REG_ADRENO_CP_MEQ_DATA,
+       REG_ADRENO_CP_HW_FAULT,
+       REG_ADRENO_CP_PROTECT_STATUS,
+       REG_ADRENO_SCRATCH_ADDR,
+       REG_ADRENO_SCRATCH_UMSK,
+       REG_ADRENO_SCRATCH_REG2,
+       REG_ADRENO_RBBM_STATUS,
+       REG_ADRENO_RBBM_PERFCTR_CTL,
+       REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
+       REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
+       REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
+       REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
+       REG_ADRENO_RBBM_INT_0_MASK,
+       REG_ADRENO_RBBM_INT_0_STATUS,
+       REG_ADRENO_RBBM_AHB_ERROR_STATUS,
+       REG_ADRENO_RBBM_PM_OVERRIDE2,
+       REG_ADRENO_RBBM_AHB_CMD,
+       REG_ADRENO_RBBM_INT_CLEAR_CMD,
+       REG_ADRENO_RBBM_SW_RESET_CMD,
+       REG_ADRENO_RBBM_CLOCK_CTL,
+       REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
+       REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
+       REG_ADRENO_VPC_DEBUG_RAM_SEL,
+       REG_ADRENO_VPC_DEBUG_RAM_READ,
+       REG_ADRENO_VSC_SIZE_ADDRESS,
+       REG_ADRENO_VFD_CONTROL_0,
+       REG_ADRENO_VFD_INDEX_MAX,
+       REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
+       REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
+       REG_ADRENO_SP_VS_OBJ_START_REG,
+       REG_ADRENO_SP_FS_OBJ_START_REG,
+       REG_ADRENO_PA_SC_AA_CONFIG,
+       REG_ADRENO_SQ_GPR_MANAGEMENT,
+       REG_ADRENO_SQ_INST_STORE_MANAGMENT,
+       REG_ADRENO_TP0_CHICKEN,
+       REG_ADRENO_RBBM_RBBM_CTL,
+       REG_ADRENO_UCHE_INVALIDATE0,
+       REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
+       REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
+       REG_ADRENO_REGISTER_MAX,
+};
+
 struct adreno_rev {
        uint8_t  core;
        uint8_t  major;
@@ -76,6 +153,13 @@ struct adreno_gpu {
        struct adreno_rbmemptrs *memptrs;
        struct drm_gem_object *memptrs_bo;
        uint32_t memptrs_iova;
+
+       /*
+        * Register offsets are different between some GPUs.
+        * GPU specific offsets will be exported by GPU specific
+        * code (a3xx_gpu.c) and stored in this common location.
+        */
+       const unsigned int *reg_offsets;
 };
 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
 
@@ -128,6 +212,16 @@ static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
        return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
 }
 
+static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
+{
+       return (gpu->revn >= 400) && (gpu->revn < 500);
+}
+
+static inline int adreno_is_a420(struct adreno_gpu *gpu)
+{
+       return gpu->revn == 420;
+}
+
 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
 int adreno_hw_init(struct msm_gpu *gpu);
 uint32_t adreno_last_fence(struct msm_gpu *gpu);
@@ -171,5 +265,37 @@ OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
        OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
 }
 
+/*
+ * adreno_checkreg_off() - Checks the validity of a register enum
+ * @gpu:               Pointer to struct adreno_gpu
+ * @offset_name:       The register enum that is checked
+ */
+static inline bool adreno_reg_check(struct adreno_gpu *gpu,
+               enum adreno_regs offset_name)
+{
+       if (offset_name >= REG_ADRENO_REGISTER_MAX ||
+                       !gpu->reg_offsets[offset_name]) {
+               BUG();
+       }
+       return true;
+}
+
+static inline u32 adreno_gpu_read(struct adreno_gpu *gpu,
+               enum adreno_regs offset_name)
+{
+       u32 reg = gpu->reg_offsets[offset_name];
+       u32 val = 0;
+       if(adreno_reg_check(gpu,offset_name))
+               val = gpu_read(&gpu->base, reg - 1);
+       return val;
+}
+
+static inline void adreno_gpu_write(struct adreno_gpu *gpu,
+               enum adreno_regs offset_name, u32 data)
+{
+       u32 reg = gpu->reg_offsets[offset_name];
+       if(adreno_reg_check(gpu, offset_name))
+               gpu_write(&gpu->base, reg - 1, data);
+}
 
 #endif /* __ADRENO_GPU_H__ */
index 6ef43f66c30a37676b9231d8f9b202b5f50c49f3..6a75cee94d811fdb12285e131e3187e113624096 100644 (file)
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (   9859 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14960 bytes, from 2014-07-27 17:22:13)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  58020 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  41068 bytes, from 2014-08-01 12:22:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  15053 bytes, from 2014-11-09 15:45:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  63169 bytes, from 2014-11-13 22:44:18)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  49097 bytes, from 2014-11-14 15:38:00)
 
 Copyright (C) 2013-2014 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -157,6 +157,7 @@ enum adreno_pm4_type3_packets {
        CP_IM_STORE = 44,
        CP_SET_DRAW_INIT_FLAGS = 75,
        CP_SET_PROTECTED_MODE = 95,
+       CP_BOOTSTRAP_UCODE = 111,
        CP_LOAD_STATE = 48,
        CP_COND_INDIRECT_BUFFER_PFE = 58,
        CP_COND_INDIRECT_BUFFER_PFD = 50,
@@ -278,11 +279,11 @@ static inline uint32_t CP_DRAW_INDX_1_INDEX_SIZE(enum pc_di_index_size val)
 #define CP_DRAW_INDX_1_NOT_EOP                                 0x00001000
 #define CP_DRAW_INDX_1_SMALL_INDEX                             0x00002000
 #define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE               0x00004000
-#define CP_DRAW_INDX_1_NUM_INDICES__MASK                       0xffff0000
-#define CP_DRAW_INDX_1_NUM_INDICES__SHIFT                      16
-static inline uint32_t CP_DRAW_INDX_1_NUM_INDICES(uint32_t val)
+#define CP_DRAW_INDX_1_NUM_INSTANCES__MASK                     0xff000000
+#define CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT                    24
+static inline uint32_t CP_DRAW_INDX_1_NUM_INSTANCES(uint32_t val)
 {
-       return ((val) << CP_DRAW_INDX_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_1_NUM_INDICES__MASK;
+       return ((val) << CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_1_NUM_INSTANCES__MASK;
 }
 
 #define REG_CP_DRAW_INDX_2                                     0x00000002
@@ -293,20 +294,20 @@ static inline uint32_t CP_DRAW_INDX_2_NUM_INDICES(uint32_t val)
        return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK;
 }
 
-#define REG_CP_DRAW_INDX_2                                     0x00000002
-#define CP_DRAW_INDX_2_INDX_BASE__MASK                         0xffffffff
-#define CP_DRAW_INDX_2_INDX_BASE__SHIFT                                0
-static inline uint32_t CP_DRAW_INDX_2_INDX_BASE(uint32_t val)
+#define REG_CP_DRAW_INDX_3                                     0x00000003
+#define CP_DRAW_INDX_3_INDX_BASE__MASK                         0xffffffff
+#define CP_DRAW_INDX_3_INDX_BASE__SHIFT                                0
+static inline uint32_t CP_DRAW_INDX_3_INDX_BASE(uint32_t val)
 {
-       return ((val) << CP_DRAW_INDX_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_2_INDX_BASE__MASK;
+       return ((val) << CP_DRAW_INDX_3_INDX_BASE__SHIFT) & CP_DRAW_INDX_3_INDX_BASE__MASK;
 }
 
-#define REG_CP_DRAW_INDX_2                                     0x00000002
-#define CP_DRAW_INDX_2_INDX_SIZE__MASK                         0xffffffff
-#define CP_DRAW_INDX_2_INDX_SIZE__SHIFT                                0
-static inline uint32_t CP_DRAW_INDX_2_INDX_SIZE(uint32_t val)
+#define REG_CP_DRAW_INDX_4                                     0x00000004
+#define CP_DRAW_INDX_4_INDX_SIZE__MASK                         0xffffffff
+#define CP_DRAW_INDX_4_INDX_SIZE__SHIFT                                0
+static inline uint32_t CP_DRAW_INDX_4_INDX_SIZE(uint32_t val)
 {
-       return ((val) << CP_DRAW_INDX_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_2_INDX_SIZE__MASK;
+       return ((val) << CP_DRAW_INDX_4_INDX_SIZE__SHIFT) & CP_DRAW_INDX_4_INDX_SIZE__MASK;
 }
 
 #define REG_CP_DRAW_INDX_2_0                                   0x00000000
@@ -345,11 +346,11 @@ static inline uint32_t CP_DRAW_INDX_2_1_INDEX_SIZE(enum pc_di_index_size val)
 #define CP_DRAW_INDX_2_1_NOT_EOP                               0x00001000
 #define CP_DRAW_INDX_2_1_SMALL_INDEX                           0x00002000
 #define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE             0x00004000
-#define CP_DRAW_INDX_2_1_NUM_INDICES__MASK                     0xffff0000
-#define CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT                    16
-static inline uint32_t CP_DRAW_INDX_2_1_NUM_INDICES(uint32_t val)
+#define CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK                   0xff000000
+#define CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT                  24
+static inline uint32_t CP_DRAW_INDX_2_1_NUM_INSTANCES(uint32_t val)
 {
-       return ((val) << CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INDICES__MASK;
+       return ((val) << CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK;
 }
 
 #define REG_CP_DRAW_INDX_2_2                                   0x00000002
@@ -388,11 +389,11 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum pc_di_index_size va
 #define CP_DRAW_INDX_OFFSET_0_NOT_EOP                          0x00001000
 #define CP_DRAW_INDX_OFFSET_0_SMALL_INDEX                      0x00002000
 #define CP_DRAW_INDX_OFFSET_0_PRE_DRAW_INITIATOR_ENABLE                0x00004000
-#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK                        0xffff0000
-#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT               16
-static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INDICES(uint32_t val)
+#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK              0xffff0000
+#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT             16
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES(uint32_t val)
 {
-       return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK;
+       return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK;
 }
 
 #define REG_CP_DRAW_INDX_OFFSET_1                              0x00000001
@@ -405,20 +406,22 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val)
        return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK;
 }
 
-#define REG_CP_DRAW_INDX_OFFSET_2                              0x00000002
-#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK                  0xffffffff
-#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT                 0
-static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_BASE(uint32_t val)
+#define REG_CP_DRAW_INDX_OFFSET_3                              0x00000003
+
+#define REG_CP_DRAW_INDX_OFFSET_4                              0x00000004
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK                  0xffffffff
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT                 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE(uint32_t val)
 {
-       return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK;
+       return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK;
 }
 
-#define REG_CP_DRAW_INDX_OFFSET_2                              0x00000002
-#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK                  0xffffffff
-#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT                 0
-static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_SIZE(uint32_t val)
+#define REG_CP_DRAW_INDX_OFFSET_5                              0x00000005
+#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK                  0xffffffff
+#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT                 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val)
 {
-       return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK;
+       return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK;
 }
 
 #define REG_CP_SET_DRAW_STATE_0                                        0x00000000
index e965898dfda6cbc2b09876fe3a9f8b68582d8fc5..448438b759b4eeb41af628913c09ecca4f1e0f2a 100644 (file)
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  23963 bytes, from 2014-10-31 16:51:46)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  23613 bytes, from 2014-07-17 15:33:30)
 
index f2bdda957205a092b4d4a0bd6f02cfcd0d32fdb6..c102a7f074acdd5936cc99b9c46fbddffffbaa5a 100644 (file)
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  23963 bytes, from 2014-10-31 16:51:46)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  23613 bytes, from 2014-07-17 15:33:30)
 
index e5b071ffd8657761d78f8499f86eb5c4e825144b..a900134bdf3300b38ac911805b825f3e683b5923 100644 (file)
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  23963 bytes, from 2014-10-31 16:51:46)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  23613 bytes, from 2014-07-17 15:33:30)
 
index 9d00dcba695950873c77baa761dea89e4b86aeac..062c687253766278e69fe46429cc927c3223cada 100644 (file)
@@ -15,6 +15,7 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/of_irq.h>
 #include "hdmi.h"
 
 void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -39,7 +40,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
                        power_on ? "Enable" : "Disable", ctrl);
 }
 
-irqreturn_t hdmi_irq(int irq, void *dev_id)
+static irqreturn_t hdmi_irq(int irq, void *dev_id)
 {
        struct hdmi *hdmi = dev_id;
 
@@ -54,9 +55,8 @@ irqreturn_t hdmi_irq(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-void hdmi_destroy(struct kref *kref)
+static void hdmi_destroy(struct hdmi *hdmi)
 {
-       struct hdmi *hdmi = container_of(kref, struct hdmi, refcount);
        struct hdmi_phy *phy = hdmi->phy;
 
        if (phy)
@@ -68,37 +68,24 @@ void hdmi_destroy(struct kref *kref)
        platform_set_drvdata(hdmi->pdev, NULL);
 }
 
-/* initialize connector */
-struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
+/* construct hdmi at bind/probe time, grab all the resources.  If
+ * we are to EPROBE_DEFER we want to do it here, rather than later
+ * at modeset_init() time
+ */
+static struct hdmi *hdmi_init(struct platform_device *pdev)
 {
+       struct hdmi_platform_config *config = pdev->dev.platform_data;
        struct hdmi *hdmi = NULL;
-       struct msm_drm_private *priv = dev->dev_private;
-       struct platform_device *pdev = priv->hdmi_pdev;
-       struct hdmi_platform_config *config;
        int i, ret;
 
-       if (!pdev) {
-               dev_err(dev->dev, "no hdmi device\n");
-               ret = -ENXIO;
-               goto fail;
-       }
-
-       config = pdev->dev.platform_data;
-
-       hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+       hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
        if (!hdmi) {
                ret = -ENOMEM;
                goto fail;
        }
 
-       kref_init(&hdmi->refcount);
-
-       hdmi->dev = dev;
        hdmi->pdev = pdev;
        hdmi->config = config;
-       hdmi->encoder = encoder;
-
-       hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
 
        /* not sure about which phy maps to which msm.. probably I miss some */
        if (config->phy_init)
@@ -108,7 +95,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
 
        if (IS_ERR(hdmi->phy)) {
                ret = PTR_ERR(hdmi->phy);
-               dev_err(dev->dev, "failed to load phy: %d\n", ret);
+               dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
                hdmi->phy = NULL;
                goto fail;
        }
@@ -127,7 +114,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
                                config->hpd_reg_names[i]);
                if (IS_ERR(reg)) {
                        ret = PTR_ERR(reg);
-                       dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n",
+                       dev_err(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
                                        config->hpd_reg_names[i], ret);
                        goto fail;
                }
@@ -143,7 +130,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
                                config->pwr_reg_names[i]);
                if (IS_ERR(reg)) {
                        ret = PTR_ERR(reg);
-                       dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n",
+                       dev_err(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
                                        config->pwr_reg_names[i], ret);
                        goto fail;
                }
@@ -158,7 +145,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
                clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]);
                if (IS_ERR(clk)) {
                        ret = PTR_ERR(clk);
-                       dev_err(dev->dev, "failed to get hpd clk: %s (%d)\n",
+                       dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
                                        config->hpd_clk_names[i], ret);
                        goto fail;
                }
@@ -173,7 +160,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
                clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]);
                if (IS_ERR(clk)) {
                        ret = PTR_ERR(clk);
-                       dev_err(dev->dev, "failed to get pwr clk: %s (%d)\n",
+                       dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
                                        config->pwr_clk_names[i], ret);
                        goto fail;
                }
@@ -184,11 +171,40 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
        hdmi->i2c = hdmi_i2c_init(hdmi);
        if (IS_ERR(hdmi->i2c)) {
                ret = PTR_ERR(hdmi->i2c);
-               dev_err(dev->dev, "failed to get i2c: %d\n", ret);
+               dev_err(&pdev->dev, "failed to get i2c: %d\n", ret);
                hdmi->i2c = NULL;
                goto fail;
        }
 
+       return hdmi;
+
+fail:
+       if (hdmi)
+               hdmi_destroy(hdmi);
+
+       return ERR_PTR(ret);
+}
+
+/* Second part of initialization, the drm/kms level modeset_init,
+ * constructs/initializes mode objects, etc, is called from master
+ * driver (not hdmi sub-device's probe/bind!)
+ *
+ * Any resource (regulator/clk/etc) which could be missing at boot
+ * should be handled in hdmi_init() so that failure happens from
+ * hdmi sub-device's probe.
+ */
+int hdmi_modeset_init(struct hdmi *hdmi,
+               struct drm_device *dev, struct drm_encoder *encoder)
+{
+       struct msm_drm_private *priv = dev->dev_private;
+       struct platform_device *pdev = hdmi->pdev;
+       int ret;
+
+       hdmi->dev = dev;
+       hdmi->encoder = encoder;
+
+       hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
+
        hdmi->bridge = hdmi_bridge_init(hdmi);
        if (IS_ERR(hdmi->bridge)) {
                ret = PTR_ERR(hdmi->bridge);
@@ -205,22 +221,20 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
                goto fail;
        }
 
-       if (!config->shared_irq) {
-               hdmi->irq = platform_get_irq(pdev, 0);
-               if (hdmi->irq < 0) {
-                       ret = hdmi->irq;
-                       dev_err(dev->dev, "failed to get irq: %d\n", ret);
-                       goto fail;
-               }
+       hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+       if (hdmi->irq < 0) {
+               ret = hdmi->irq;
+               dev_err(dev->dev, "failed to get irq: %d\n", ret);
+               goto fail;
+       }
 
-               ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
-                               NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
-                               "hdmi_isr", hdmi);
-               if (ret < 0) {
-                       dev_err(dev->dev, "failed to request IRQ%u: %d\n",
-                                       hdmi->irq, ret);
-                       goto fail;
-               }
+       ret = devm_request_irq(&pdev->dev, hdmi->irq,
+                       hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+                       "hdmi_isr", hdmi);
+       if (ret < 0) {
+               dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+                               hdmi->irq, ret);
+               goto fail;
        }
 
        encoder->bridge = hdmi->bridge;
@@ -230,19 +244,20 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
 
        platform_set_drvdata(pdev, hdmi);
 
-       return hdmi;
+       return 0;
 
 fail:
-       if (hdmi) {
-               /* bridge/connector are normally destroyed by drm: */
-               if (hdmi->bridge)
-                       hdmi->bridge->funcs->destroy(hdmi->bridge);
-               if (hdmi->connector)
-                       hdmi->connector->funcs->destroy(hdmi->connector);
-               hdmi_destroy(&hdmi->refcount);
+       /* bridge/connector are normally destroyed by drm: */
+       if (hdmi->bridge) {
+               hdmi->bridge->funcs->destroy(hdmi->bridge);
+               hdmi->bridge = NULL;
+       }
+       if (hdmi->connector) {
+               hdmi->connector->funcs->destroy(hdmi->connector);
+               hdmi->connector = NULL;
        }
 
-       return ERR_PTR(ret);
+       return ret;
 }
 
 /*
@@ -251,13 +266,6 @@ fail:
 
 #include <linux/of_gpio.h>
 
-static void set_hdmi_pdev(struct drm_device *dev,
-               struct platform_device *pdev)
-{
-       struct msm_drm_private *priv = dev->dev_private;
-       priv->hdmi_pdev = pdev;
-}
-
 #ifdef CONFIG_OF
 static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
 {
@@ -278,7 +286,10 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
 
 static int hdmi_bind(struct device *dev, struct device *master, void *data)
 {
+       struct drm_device *drm = dev_get_drvdata(master);
+       struct msm_drm_private *priv = drm->dev_private;
        static struct hdmi_platform_config config = {};
+       struct hdmi *hdmi;
 #ifdef CONFIG_OF
        struct device_node *of_node = dev->of_node;
 
@@ -298,7 +309,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
                config.hpd_clk_cnt   = ARRAY_SIZE(hpd_clk_names);
                config.pwr_clk_names = pwr_clk_names;
                config.pwr_clk_cnt   = ARRAY_SIZE(pwr_clk_names);
-               config.shared_irq    = true;
        } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) {
                static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
                static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"};
@@ -369,14 +379,22 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
        }
 #endif
        dev->platform_data = &config;
-       set_hdmi_pdev(dev_get_drvdata(master), to_platform_device(dev));
+       hdmi = hdmi_init(to_platform_device(dev));
+       if (IS_ERR(hdmi))
+               return PTR_ERR(hdmi);
+       priv->hdmi = hdmi;
        return 0;
 }
 
 static void hdmi_unbind(struct device *dev, struct device *master,
                void *data)
 {
-       set_hdmi_pdev(dev_get_drvdata(master), NULL);
+       struct drm_device *drm = dev_get_drvdata(master);
+       struct msm_drm_private *priv = drm->dev_private;
+       if (priv->hdmi) {
+               hdmi_destroy(priv->hdmi);
+               priv->hdmi = NULL;
+       }
 }
 
 static const struct component_ops hdmi_ops = {
index b981995410b50a886e21f74e38589d6af415f950..43e654f751b741a06578989b121d225139a3664a 100644 (file)
@@ -38,8 +38,6 @@ struct hdmi_audio {
 };
 
 struct hdmi {
-       struct kref refcount;
-
        struct drm_device *dev;
        struct platform_device *pdev;
 
@@ -97,13 +95,9 @@ struct hdmi_platform_config {
        /* gpio's: */
        int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
        int mux_lpm_gpio;
-
-       /* older devices had their own irq, mdp5+ it is shared w/ mdp: */
-       bool shared_irq;
 };
 
 void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
-void hdmi_destroy(struct kref *kref);
 
 static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
 {
@@ -115,17 +109,6 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
        return msm_readl(hdmi->mmio + reg);
 }
 
-static inline struct hdmi * hdmi_reference(struct hdmi *hdmi)
-{
-       kref_get(&hdmi->refcount);
-       return hdmi;
-}
-
-static inline void hdmi_unreference(struct hdmi *hdmi)
-{
-       kref_put(&hdmi->refcount, hdmi_destroy);
-}
-
 /*
  * The phy appears to be different, for example between 8960 and 8x60,
  * so split the phy related functions out and load the correct one at
index 76fd0cfc6558fc622755ca90ff4c3fbc9a4f17ea..5b0844befbab6b42b268dad38f8d864138adec01 100644 (file)
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  23963 bytes, from 2014-10-31 16:51:46)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  23613 bytes, from 2014-07-17 15:33:30)
 
index f6cf745c249e60a604e8ab00f99ca75cb3af2b43..6902ad6da710fe2d4f7fb7e9f56c3f84ba1c5b49 100644 (file)
@@ -26,7 +26,6 @@ struct hdmi_bridge {
 static void hdmi_bridge_destroy(struct drm_bridge *bridge)
 {
        struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
-       hdmi_unreference(hdmi_bridge->hdmi);
        drm_bridge_cleanup(bridge);
        kfree(hdmi_bridge);
 }
@@ -218,7 +217,7 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
                goto fail;
        }
 
-       hdmi_bridge->hdmi = hdmi_reference(hdmi);
+       hdmi_bridge->hdmi = hdmi;
 
        bridge = &hdmi_bridge->base;
 
index 4aca2a3c667cff443495f14508c5acdf7a7b0a48..fbebb0405d76d7df217c1772a26cc43e78578b29 100644 (file)
@@ -330,8 +330,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
        drm_connector_unregister(connector);
        drm_connector_cleanup(connector);
 
-       hdmi_unreference(hdmi_connector->hdmi);
-
        kfree(hdmi_connector);
 }
 
@@ -401,6 +399,9 @@ static const struct drm_connector_funcs hdmi_connector_funcs = {
        .detect = hdmi_connector_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = hdmi_connector_destroy,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
 static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
@@ -422,7 +423,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
                goto fail;
        }
 
-       hdmi_connector->hdmi = hdmi_reference(hdmi);
+       hdmi_connector->hdmi = hdmi;
        INIT_WORK(&hdmi_connector->hpd_work, hotplug_work);
 
        connector = &hdmi_connector->base;
index f408b69486a8eb83d299abc636356187d2d467bb..eeed006eed1337a6c07c62e0cc62d944b95fce83 100644 (file)
@@ -510,7 +510,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
 
 #ifdef CONFIG_COMMON_CLK
        phy_8960->pll_hw.init = &pll_init;
-       phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
+       phy_8960->pll = devm_clk_register(&hdmi->pdev->dev, &phy_8960->pll_hw);
        if (IS_ERR(phy_8960->pll)) {
                ret = PTR_ERR(phy_8960->pll);
                phy_8960->pll = NULL;
index d53c29327df908c81e957467803ce0d6d6ad3ece..29bd796797de6e240d4749fd2b6d8623b1a592d9 100644 (file)
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  23963 bytes, from 2014-10-31 16:51:46)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  23613 bytes, from 2014-07-17 15:33:30)
 
index 03c0bd9cd5b9270dcec078aaa71ce7be2adff210..a4a7f8c7122acfcb93a9a68b52af9ab4847fa023 100644 (file)
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  23963 bytes, from 2014-10-31 16:51:46)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  23613 bytes, from 2014-07-17 15:33:30)
 
index 7d00f7fb5773571c2fd005df0e278c18de425b64..a7672e100d8b5bd5f4ac4b782a3b1d0a38197a79 100644 (file)
@@ -25,8 +25,6 @@
 struct mdp4_crtc {
        struct drm_crtc base;
        char name[8];
-       struct drm_plane *plane;
-       struct drm_plane *planes[8];
        int id;
        int ovlp;
        enum mdp4_dma dma;
@@ -52,25 +50,11 @@ struct mdp4_crtc {
 
        /* if there is a pending flip, these will be non-null: */
        struct drm_pending_vblank_event *event;
-       struct msm_fence_cb pageflip_cb;
 
 #define PENDING_CURSOR 0x1
 #define PENDING_FLIP   0x2
        atomic_t pending;
 
-       /* the fb that we logically (from PoV of KMS API) hold a ref
-        * to.  Which we may not yet be scanning out (we may still
-        * be scanning out previous in case of page_flip while waiting
-        * for gpu rendering to complete:
-        */
-       struct drm_framebuffer *fb;
-
-       /* the fb that we currently hold a scanout ref to: */
-       struct drm_framebuffer *scanout_fb;
-
-       /* for unref'ing framebuffers after scanout completes: */
-       struct drm_flip_work unref_fb_work;
-
        /* for unref'ing cursor bo's after scanout completes: */
        struct drm_flip_work unref_cursor_work;
 
@@ -97,15 +81,14 @@ static void crtc_flush(struct drm_crtc *crtc)
 {
        struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
        struct mdp4_kms *mdp4_kms = get_kms(crtc);
-       uint32_t i, flush = 0;
+       struct drm_plane *plane;
+       uint32_t flush = 0;
 
-       for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
-               struct drm_plane *plane = mdp4_crtc->planes[i];
-               if (plane) {
-                       enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
-                       flush |= pipe2flush(pipe_id);
-               }
+       drm_atomic_crtc_for_each_plane(plane, crtc) {
+               enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+               flush |= pipe2flush(pipe_id);
        }
+
        flush |= ovlp2flush(mdp4_crtc->ovlp);
 
        DBG("%s: flush=%08x", mdp4_crtc->name, flush);
@@ -113,47 +96,6 @@ static void crtc_flush(struct drm_crtc *crtc)
        mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
 }
 
-static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       struct drm_framebuffer *old_fb = mdp4_crtc->fb;
-
-       /* grab reference to incoming scanout fb: */
-       drm_framebuffer_reference(new_fb);
-       mdp4_crtc->base.primary->fb = new_fb;
-       mdp4_crtc->fb = new_fb;
-
-       if (old_fb)
-               drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
-}
-
-/* unlike update_fb(), take a ref to the new scanout fb *before* updating
- * plane, then call this.  Needed to ensure we don't unref the buffer that
- * is actually still being scanned out.
- *
- * Note that this whole thing goes away with atomic.. since we can defer
- * calling into driver until rendering is done.
- */
-static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
-       /* flush updates, to make sure hw is updated to new scanout fb,
-        * so that we can safely queue unref to current fb (ie. next
-        * vblank we know hw is done w/ previous scanout_fb).
-        */
-       crtc_flush(crtc);
-
-       if (mdp4_crtc->scanout_fb)
-               drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
-                               mdp4_crtc->scanout_fb);
-
-       mdp4_crtc->scanout_fb = fb;
-
-       /* enable vblank to complete flip: */
-       request_pending(crtc, PENDING_FLIP);
-}
-
 /* if file!=NULL, this is preclose potential cancel-flip path */
 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
 {
@@ -171,38 +113,13 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
                 */
                if (!file || (event->base.file_priv == file)) {
                        mdp4_crtc->event = NULL;
+                       DBG("%s: send event: %p", mdp4_crtc->name, event);
                        drm_send_vblank_event(dev, mdp4_crtc->id, event);
                }
        }
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
-static void pageflip_cb(struct msm_fence_cb *cb)
-{
-       struct mdp4_crtc *mdp4_crtc =
-               container_of(cb, struct mdp4_crtc, pageflip_cb);
-       struct drm_crtc *crtc = &mdp4_crtc->base;
-       struct drm_framebuffer *fb = crtc->primary->fb;
-
-       if (!fb)
-               return;
-
-       drm_framebuffer_reference(fb);
-       mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
-       update_scanout(crtc, fb);
-}
-
-static void unref_fb_worker(struct drm_flip_work *work, void *val)
-{
-       struct mdp4_crtc *mdp4_crtc =
-               container_of(work, struct mdp4_crtc, unref_fb_work);
-       struct drm_device *dev = mdp4_crtc->base.dev;
-
-       mutex_lock(&dev->mode_config.mutex);
-       drm_framebuffer_unreference(val);
-       mutex_unlock(&dev->mode_config.mutex);
-}
-
 static void unref_cursor_worker(struct drm_flip_work *work, void *val)
 {
        struct mdp4_crtc *mdp4_crtc =
@@ -218,7 +135,6 @@ static void mdp4_crtc_destroy(struct drm_crtc *crtc)
        struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
 
        drm_crtc_cleanup(crtc);
-       drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
        drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
 
        kfree(mdp4_crtc);
@@ -251,57 +167,70 @@ static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
        return true;
 }
 
-static void blend_setup(struct drm_crtc *crtc)
+/* statically (for now) map planes to mixer stage (z-order): */
+static const int idxs[] = {
+               [VG1]  = 1,
+               [VG2]  = 2,
+               [RGB1] = 0,
+               [RGB2] = 0,
+               [RGB3] = 0,
+               [VG3]  = 3,
+               [VG4]  = 4,
+
+};
+
+/* setup mixer config, for which we need to consider all crtc's and
+ * the planes attached to them
+ *
+ * TODO may possibly need some extra locking here
+ */
+static void setup_mixer(struct mdp4_kms *mdp4_kms)
 {
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       struct mdp4_kms *mdp4_kms = get_kms(crtc);
-       int i, ovlp = mdp4_crtc->ovlp;
+       struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
+       struct drm_crtc *crtc;
        uint32_t mixer_cfg = 0;
        static const enum mdp_mixer_stage_id stages[] = {
                        STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
        };
-       /* statically (for now) map planes to mixer stage (z-order): */
-       static const int idxs[] = {
-                       [VG1]  = 1,
-                       [VG2]  = 2,
-                       [RGB1] = 0,
-                       [RGB2] = 0,
-                       [RGB3] = 0,
-                       [VG3]  = 3,
-                       [VG4]  = 4,
 
-       };
-       bool alpha[4]= { false, false, false, false };
+       list_for_each_entry(crtc, &config->crtc_list, head) {
+               struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+               struct drm_plane *plane;
 
-       /* Don't rely on value read back from hw, but instead use our
-        * own shadowed value.  Possibly disable/reenable looses the
-        * previous value and goes back to power-on default?
-        */
-       mixer_cfg = mdp4_kms->mixer_cfg;
+               drm_atomic_crtc_for_each_plane(plane, crtc) {
+                       enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+                       int idx = idxs[pipe_id];
+                       mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
+                                       pipe_id, stages[idx]);
+               }
+       }
+
+       mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
+}
+
+static void blend_setup(struct drm_crtc *crtc)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       struct mdp4_kms *mdp4_kms = get_kms(crtc);
+       struct drm_plane *plane;
+       int i, ovlp = mdp4_crtc->ovlp;
+       bool alpha[4]= { false, false, false, false };
 
        mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
        mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
        mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
        mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
 
-       for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
-               struct drm_plane *plane = mdp4_crtc->planes[i];
-               if (plane) {
-                       enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
-                       int idx = idxs[pipe_id];
-                       if (idx > 0) {
-                               const struct mdp_format *format =
+       drm_atomic_crtc_for_each_plane(plane, crtc) {
+               enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+               int idx = idxs[pipe_id];
+               if (idx > 0) {
+                       const struct mdp_format *format =
                                        to_mdp_format(msm_framebuffer_format(plane->fb));
-                               alpha[idx-1] = format->alpha_enable;
-                       }
-                       mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
-                                       pipe_id, stages[idx]);
+                       alpha[idx-1] = format->alpha_enable;
                }
        }
 
-       /* this shouldn't happen.. and seems to cause underflow: */
-       WARN_ON(!mixer_cfg);
-
        for (i = 0; i < 4; i++) {
                uint32_t op;
 
@@ -324,22 +253,21 @@ static void blend_setup(struct drm_crtc *crtc)
                mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
        }
 
-       mdp4_kms->mixer_cfg = mixer_cfg;
-       mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
+       setup_mixer(mdp4_kms);
 }
 
-static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
-               struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode,
-               int x, int y,
-               struct drm_framebuffer *old_fb)
+static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
        struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
        struct mdp4_kms *mdp4_kms = get_kms(crtc);
        enum mdp4_dma dma = mdp4_crtc->dma;
-       int ret, ovlp = mdp4_crtc->ovlp;
+       int ovlp = mdp4_crtc->ovlp;
+       struct drm_display_mode *mode;
+
+       if (WARN_ON(!crtc->state))
+               return;
 
-       mode = adjusted_mode;
+       mode = &crtc->state->adjusted_mode;
 
        DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
                        mdp4_crtc->name, mode->base.id, mode->name,
@@ -350,28 +278,13 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
                        mode->vsync_end, mode->vtotal,
                        mode->type, mode->flags);
 
-       /* grab extra ref for update_scanout() */
-       drm_framebuffer_reference(crtc->primary->fb);
-
-       ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->primary->fb,
-                       0, 0, mode->hdisplay, mode->vdisplay,
-                       x << 16, y << 16,
-                       mode->hdisplay << 16, mode->vdisplay << 16);
-       if (ret) {
-               drm_framebuffer_unreference(crtc->primary->fb);
-               dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
-                               mdp4_crtc->name, ret);
-               return ret;
-       }
-
        mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
                        MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
                        MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
 
        /* take data from pipe: */
        mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
-       mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
-                       crtc->primary->fb->pitches[0]);
+       mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
        mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
                        MDP4_DMA_DST_SIZE_WIDTH(0) |
                        MDP4_DMA_DST_SIZE_HEIGHT(0));
@@ -380,8 +293,7 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
        mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
                        MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
                        MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
-       mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
-                       crtc->primary->fb->pitches[0]);
+       mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
 
        mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
 
@@ -390,11 +302,6 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
                mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
                mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
        }
-
-       update_fb(crtc, crtc->primary->fb);
-       update_scanout(crtc, crtc->primary->fb);
-
-       return 0;
 }
 
 static void mdp4_crtc_prepare(struct drm_crtc *crtc)
@@ -416,60 +323,51 @@ static void mdp4_crtc_commit(struct drm_crtc *crtc)
        drm_crtc_vblank_put(crtc);
 }
 
-static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-               struct drm_framebuffer *old_fb)
+static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
+               struct drm_crtc_state *state)
 {
        struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       struct drm_plane *plane = mdp4_crtc->plane;
-       struct drm_display_mode *mode = &crtc->mode;
-       int ret;
+       struct drm_device *dev = crtc->dev;
 
-       /* grab extra ref for update_scanout() */
-       drm_framebuffer_reference(crtc->primary->fb);
+       DBG("%s: check", mdp4_crtc->name);
 
-       ret = mdp4_plane_mode_set(plane, crtc, crtc->primary->fb,
-                       0, 0, mode->hdisplay, mode->vdisplay,
-                       x << 16, y << 16,
-                       mode->hdisplay << 16, mode->vdisplay << 16);
-       if (ret) {
-               drm_framebuffer_unreference(crtc->primary->fb);
-               return ret;
+       if (mdp4_crtc->event) {
+               dev_err(dev->dev, "already pending flip!\n");
+               return -EBUSY;
        }
 
-       update_fb(crtc, crtc->primary->fb);
-       update_scanout(crtc, crtc->primary->fb);
+       // TODO anything else to check?
 
        return 0;
 }
 
-static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc)
 {
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       DBG("%s: begin", mdp4_crtc->name);
 }
 
-static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
-               struct drm_framebuffer *new_fb,
-               struct drm_pending_vblank_event *event,
-               uint32_t page_flip_flags)
+static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
 {
        struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
        struct drm_device *dev = crtc->dev;
-       struct drm_gem_object *obj;
        unsigned long flags;
 
-       if (mdp4_crtc->event) {
-               dev_err(dev->dev, "already pending flip!\n");
-               return -EBUSY;
-       }
+       DBG("%s: flush", mdp4_crtc->name);
 
-       obj = msm_framebuffer_bo(new_fb, 0);
+       WARN_ON(mdp4_crtc->event);
 
        spin_lock_irqsave(&dev->event_lock, flags);
-       mdp4_crtc->event = event;
+       mdp4_crtc->event = crtc->state->event;
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
-       update_fb(crtc, new_fb);
-
-       return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
+       blend_setup(crtc);
+       crtc_flush(crtc);
+       request_pending(crtc, PENDING_FLIP);
 }
 
 static int mdp4_crtc_set_property(struct drm_crtc *crtc,
@@ -607,22 +505,29 @@ static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 }
 
 static const struct drm_crtc_funcs mdp4_crtc_funcs = {
-       .set_config = drm_crtc_helper_set_config,
+       .set_config = drm_atomic_helper_set_config,
        .destroy = mdp4_crtc_destroy,
-       .page_flip = mdp4_crtc_page_flip,
+       .page_flip = drm_atomic_helper_page_flip,
        .set_property = mdp4_crtc_set_property,
        .cursor_set = mdp4_crtc_cursor_set,
        .cursor_move = mdp4_crtc_cursor_move,
+       .reset = drm_atomic_helper_crtc_reset,
+       .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
 };
 
 static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
        .dpms = mdp4_crtc_dpms,
        .mode_fixup = mdp4_crtc_mode_fixup,
-       .mode_set = mdp4_crtc_mode_set,
+       .mode_set_nofb = mdp4_crtc_mode_set_nofb,
+       .mode_set = drm_helper_crtc_mode_set,
+       .mode_set_base = drm_helper_crtc_mode_set_base,
        .prepare = mdp4_crtc_prepare,
        .commit = mdp4_crtc_commit,
-       .mode_set_base = mdp4_crtc_mode_set_base,
        .load_lut = mdp4_crtc_load_lut,
+       .atomic_check = mdp4_crtc_atomic_check,
+       .atomic_begin = mdp4_crtc_atomic_begin,
+       .atomic_flush = mdp4_crtc_atomic_flush,
 };
 
 static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
@@ -638,7 +543,6 @@ static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
 
        if (pending & PENDING_FLIP) {
                complete_flip(crtc, NULL);
-               drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
        }
 
        if (pending & PENDING_CURSOR) {
@@ -663,7 +567,8 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
 
 void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
 {
-       DBG("cancel: %p", file);
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       DBG("%s: cancel: %p", mdp4_crtc->name, file);
        complete_flip(crtc, file);
 }
 
@@ -717,35 +622,6 @@ void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
        mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
 }
 
-static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
-               struct drm_plane *plane)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
-       BUG_ON(pipe_id >= ARRAY_SIZE(mdp4_crtc->planes));
-
-       if (mdp4_crtc->planes[pipe_id] == plane)
-               return;
-
-       mdp4_crtc->planes[pipe_id] = plane;
-       blend_setup(crtc);
-       if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
-               crtc_flush(crtc);
-}
-
-void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
-{
-       set_attach(crtc, mdp4_plane_pipe(plane), plane);
-}
-
-void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
-{
-       /* don't actually detatch our primary plane: */
-       if (to_mdp4_crtc(crtc)->plane == plane)
-               return;
-       set_attach(crtc, mdp4_plane_pipe(plane), NULL);
-}
-
 static const char *dma_names[] = {
                "DMA_P", "DMA_S", "DMA_E",
 };
@@ -757,17 +633,13 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
 {
        struct drm_crtc *crtc = NULL;
        struct mdp4_crtc *mdp4_crtc;
-       int ret;
 
        mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
-       if (!mdp4_crtc) {
-               ret = -ENOMEM;
-               goto fail;
-       }
+       if (!mdp4_crtc)
+               return ERR_PTR(-ENOMEM);
 
        crtc = &mdp4_crtc->base;
 
-       mdp4_crtc->plane = plane;
        mdp4_crtc->id = id;
 
        mdp4_crtc->ovlp = ovlp_id;
@@ -784,26 +656,14 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
 
        spin_lock_init(&mdp4_crtc->cursor.lock);
 
-       ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16,
-                       "unref fb", unref_fb_worker);
-       if (ret)
-               goto fail;
-
-       ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
+       drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
                        "unref cursor", unref_cursor_worker);
 
-       INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
-
        drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs);
        drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
+       plane->crtc = crtc;
 
-       mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
+       mdp4_plane_install_properties(plane, &crtc->base);
 
        return crtc;
-
-fail:
-       if (crtc)
-               mdp4_crtc_destroy(crtc);
-
-       return ERR_PTR(ret);
 }
index 79d804e61cc44e48710f7a4703f38008e7a37b6c..a62109e4ae0d3603822fdf91fe99523f58c7b6e4 100644 (file)
@@ -228,7 +228,6 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
        struct drm_encoder *encoder;
        struct drm_connector *connector;
        struct drm_panel *panel;
-       struct hdmi *hdmi;
        int ret;
 
        /* construct non-private planes: */
@@ -326,11 +325,13 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
        priv->crtcs[priv->num_crtcs++] = crtc;
        priv->encoders[priv->num_encoders++] = encoder;
 
-       hdmi = hdmi_init(dev, encoder);
-       if (IS_ERR(hdmi)) {
-               ret = PTR_ERR(hdmi);
-               dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
-               goto fail;
+       if (priv->hdmi) {
+               /* Construct bridge/connector for HDMI: */
+               ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
+               if (ret) {
+                       dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+                       goto fail;
+               }
        }
 
        return 0;
@@ -381,6 +382,10 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
        if (IS_ERR(mdp4_kms->dsi_pll_vddio))
                mdp4_kms->dsi_pll_vddio = NULL;
 
+       /* NOTE: driver for this regulator still missing upstream.. use
+        * _get_exclusive() and ignore the error if it does not exist
+        * (and hope that the bootloader left it on for us)
+        */
        mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
        if (IS_ERR(mdp4_kms->vdd))
                mdp4_kms->vdd = NULL;
index 9ff6e7ccfe90d248fc70a2ccaabba6d92d7aab0a..cbd77bc626d5e8eecce7ec0b8e95797f867a5bf0 100644 (file)
@@ -32,13 +32,6 @@ struct mdp4_kms {
 
        int rev;
 
-       /* Shadow value for MDP4_LAYERMIXER_IN_CFG.. since setup for all
-        * crtcs/encoders is in one shared register, we need to update it
-        * via read/modify/write.  But to avoid getting confused by power-
-        * on-default values after resume, use this shadow value instead:
-        */
-       uint32_t mixer_cfg;
-
        /* mapper-id used to request GEM buffer mapped for scanout: */
        int id;
 
@@ -194,14 +187,6 @@ uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
 
 void mdp4_plane_install_properties(struct drm_plane *plane,
                struct drm_mode_object *obj);
-void mdp4_plane_set_scanout(struct drm_plane *plane,
-               struct drm_framebuffer *fb);
-int mdp4_plane_mode_set(struct drm_plane *plane,
-               struct drm_crtc *crtc, struct drm_framebuffer *fb,
-               int crtc_x, int crtc_y,
-               unsigned int crtc_w, unsigned int crtc_h,
-               uint32_t src_x, uint32_t src_y,
-               uint32_t src_w, uint32_t src_h);
 enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
 struct drm_plane *mdp4_plane_init(struct drm_device *dev,
                enum mdp4_pipe pipe_id, bool private_plane);
@@ -210,8 +195,6 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
 void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
 void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
 void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
-void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
-void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
 struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
                struct drm_plane *plane, int id, int ovlp_id,
                enum mdp4_dma dma_id);
index 310034688c15a60a2a2d2d1364a1fb83bce8375b..4ddc28e1275b381fbc877dc2c706cb4215252b52 100644 (file)
@@ -98,6 +98,9 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
        .detect = mdp4_lvds_connector_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = mdp4_lvds_connector_destroy,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
 static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
index 66f33dba1ebb3e520f3a2bfa1e4473eaeaacd7ba..1e5ebe83647d1aa3f86dd2c60024304932c9e11f 100644 (file)
@@ -31,47 +31,26 @@ struct mdp4_plane {
 };
 #define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
 
-static struct mdp4_kms *get_kms(struct drm_plane *plane)
-{
-       struct msm_drm_private *priv = plane->dev->dev_private;
-       return to_mdp4_kms(to_mdp_kms(priv->kms));
-}
-
-static int mdp4_plane_update(struct drm_plane *plane,
+static void mdp4_plane_set_scanout(struct drm_plane *plane,
+               struct drm_framebuffer *fb);
+static int mdp4_plane_mode_set(struct drm_plane *plane,
                struct drm_crtc *crtc, struct drm_framebuffer *fb,
                int crtc_x, int crtc_y,
                unsigned int crtc_w, unsigned int crtc_h,
                uint32_t src_x, uint32_t src_y,
-               uint32_t src_w, uint32_t src_h)
-{
-       struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
-
-       mdp4_plane->enabled = true;
-
-       if (plane->fb)
-               drm_framebuffer_unreference(plane->fb);
-
-       drm_framebuffer_reference(fb);
-
-       return mdp4_plane_mode_set(plane, crtc, fb,
-                       crtc_x, crtc_y, crtc_w, crtc_h,
-                       src_x, src_y, src_w, src_h);
-}
+               uint32_t src_w, uint32_t src_h);
 
-static int mdp4_plane_disable(struct drm_plane *plane)
+static struct mdp4_kms *get_kms(struct drm_plane *plane)
 {
-       struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
-       DBG("%s: disable", mdp4_plane->name);
-       if (plane->crtc)
-               mdp4_crtc_detach(plane->crtc, plane);
-       return 0;
+       struct msm_drm_private *priv = plane->dev->dev_private;
+       return to_mdp4_kms(to_mdp_kms(priv->kms));
 }
 
 static void mdp4_plane_destroy(struct drm_plane *plane)
 {
        struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
 
-       mdp4_plane_disable(plane);
+       drm_plane_helper_disable(plane);
        drm_plane_cleanup(plane);
 
        kfree(mdp4_plane);
@@ -92,19 +71,75 @@ int mdp4_plane_set_property(struct drm_plane *plane,
 }
 
 static const struct drm_plane_funcs mdp4_plane_funcs = {
-               .update_plane = mdp4_plane_update,
-               .disable_plane = mdp4_plane_disable,
+               .update_plane = drm_atomic_helper_update_plane,
+               .disable_plane = drm_atomic_helper_disable_plane,
                .destroy = mdp4_plane_destroy,
                .set_property = mdp4_plane_set_property,
+               .reset = drm_atomic_helper_plane_reset,
+               .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+               .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
 };
 
-void mdp4_plane_set_scanout(struct drm_plane *plane,
+static int mdp4_plane_prepare_fb(struct drm_plane *plane,
+               struct drm_framebuffer *fb)
+{
+       struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+       struct mdp4_kms *mdp4_kms = get_kms(plane);
+
+       DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
+       return msm_framebuffer_prepare(fb, mdp4_kms->id);
+}
+
+static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
+               struct drm_framebuffer *fb)
+{
+       struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+       struct mdp4_kms *mdp4_kms = get_kms(plane);
+
+       DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
+       msm_framebuffer_cleanup(fb, mdp4_kms->id);
+}
+
+
+static int mdp4_plane_atomic_check(struct drm_plane *plane,
+               struct drm_plane_state *state)
+{
+       return 0;
+}
+
+static void mdp4_plane_atomic_update(struct drm_plane *plane,
+                                    struct drm_plane_state *old_state)
+{
+       struct drm_plane_state *state = plane->state;
+       int ret;
+
+       ret = mdp4_plane_mode_set(plane,
+                       state->crtc, state->fb,
+                       state->crtc_x, state->crtc_y,
+                       state->crtc_w, state->crtc_h,
+                       state->src_x,  state->src_y,
+                       state->src_w, state->src_h);
+       /* atomic_check should have ensured that this doesn't fail */
+       WARN_ON(ret < 0);
+}
+
+static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = {
+               .prepare_fb = mdp4_plane_prepare_fb,
+               .cleanup_fb = mdp4_plane_cleanup_fb,
+               .atomic_check = mdp4_plane_atomic_check,
+               .atomic_update = mdp4_plane_atomic_update,
+};
+
+static void mdp4_plane_set_scanout(struct drm_plane *plane,
                struct drm_framebuffer *fb)
 {
        struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
        struct mdp4_kms *mdp4_kms = get_kms(plane);
        enum mdp4_pipe pipe = mdp4_plane->pipe;
-       uint32_t iova;
+       uint32_t iova = msm_framebuffer_iova(fb, mdp4_kms->id, 0);
+
+       DBG("%s: set_scanout: %08x (%u)", mdp4_plane->name,
+                       iova, fb->pitches[0]);
 
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
                        MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
@@ -114,7 +149,6 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
                        MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
                        MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
 
-       msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
 
        plane->fb = fb;
@@ -122,7 +156,7 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
 
 #define MDP4_VG_PHASE_STEP_DEFAULT     0x20000000
 
-int mdp4_plane_mode_set(struct drm_plane *plane,
+static int mdp4_plane_mode_set(struct drm_plane *plane,
                struct drm_crtc *crtc, struct drm_framebuffer *fb,
                int crtc_x, int crtc_y,
                unsigned int crtc_w, unsigned int crtc_h,
@@ -137,6 +171,11 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
        uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
        uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
 
+       if (!(crtc && fb)) {
+               DBG("%s: disabled!", mdp4_plane->name);
+               return 0;
+       }
+
        /* src values are in Q16 fixed point, convert to integer: */
        src_x = src_x >> 16;
        src_y = src_y >> 16;
@@ -197,9 +236,6 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
 
-       /* TODO detach from old crtc (if we had more than one) */
-       mdp4_crtc_attach(crtc, plane);
-
        return 0;
 }
 
@@ -239,9 +275,12 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
                        ARRAY_SIZE(mdp4_plane->formats));
 
        type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
-       drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
-                                mdp4_plane->formats, mdp4_plane->nformats,
-                                type);
+       ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
+                                mdp4_plane->formats, mdp4_plane->nformats, type);
+       if (ret)
+               goto fail;
+
+       drm_plane_helper_add(plane, &mdp4_plane_helper_funcs);
 
        mdp4_plane_install_properties(plane, &plane->base);
 
index 67f4f896ba8ce99647812f28e44b54e18ec0d4ee..e87ef5512cb0ade999f44d7c2169069d7022cdc6 100644 (file)
@@ -10,14 +10,14 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2014-06-25 12:55:02)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  23963 bytes, from 2014-10-31 16:51:46)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  23613 bytes, from 2014-06-25 12:53:44)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  23613 bytes, from 2014-07-17 15:33:30)
 
 Copyright (C) 2013-2014 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
new file mode 100644 (file)
index 0000000..b0a4431
--- /dev/null
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mdp5_kms.h"
+#include "mdp5_cfg.h"
+
+struct mdp5_cfg_handler {
+       int revision;
+       struct mdp5_cfg config;
+};
+
+/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
+const struct mdp5_cfg_hw *mdp5_cfg = NULL;
+
+const struct mdp5_cfg_hw msm8x74_config = {
+       .name = "msm8x74",
+       .smp = {
+               .mmb_count = 22,
+               .mmb_size = 4096,
+       },
+       .ctl = {
+               .count = 5,
+               .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+       },
+       .pipe_vig = {
+               .count = 3,
+               .base = { 0x01200, 0x01600, 0x01a00 },
+       },
+       .pipe_rgb = {
+               .count = 3,
+               .base = { 0x01e00, 0x02200, 0x02600 },
+       },
+       .pipe_dma = {
+               .count = 2,
+               .base = { 0x02a00, 0x02e00 },
+       },
+       .lm = {
+               .count = 5,
+               .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+               .nb_stages = 5,
+       },
+       .dspp = {
+               .count = 3,
+               .base = { 0x04600, 0x04a00, 0x04e00 },
+       },
+       .ad = {
+               .count = 2,
+               .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
+       },
+       .intf = {
+               .count = 4,
+               .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
+       },
+       .max_clk = 200000000,
+};
+
+const struct mdp5_cfg_hw apq8084_config = {
+       .name = "apq8084",
+       .smp = {
+               .mmb_count = 44,
+               .mmb_size = 8192,
+               .reserved_state[0] = GENMASK(7, 0),     /* first 8 MMBs */
+               .reserved[CID_RGB0] = 2,
+               .reserved[CID_RGB1] = 2,
+               .reserved[CID_RGB2] = 2,
+               .reserved[CID_RGB3] = 2,
+       },
+       .ctl = {
+               .count = 5,
+               .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+       },
+       .pipe_vig = {
+               .count = 4,
+               .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
+       },
+       .pipe_rgb = {
+               .count = 4,
+               .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
+       },
+       .pipe_dma = {
+               .count = 2,
+               .base = { 0x03200, 0x03600 },
+       },
+       .lm = {
+               .count = 6,
+               .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
+               .nb_stages = 5,
+       },
+       .dspp = {
+               .count = 4,
+               .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
+
+       },
+       .ad = {
+               .count = 3,
+               .base = { 0x13500, 0x13700, 0x13900 },
+       },
+       .intf = {
+               .count = 5,
+               .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
+       },
+       .max_clk = 320000000,
+};
+
+static const struct mdp5_cfg_handler cfg_handlers[] = {
+       { .revision = 0, .config = { .hw = &msm8x74_config } },
+       { .revision = 2, .config = { .hw = &msm8x74_config } },
+       { .revision = 3, .config = { .hw = &apq8084_config } },
+};
+
+
+static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
+
+const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
+{
+       return cfg_handler->config.hw;
+}
+
+struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler)
+{
+       return &cfg_handler->config;
+}
+
+int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler)
+{
+       return cfg_handler->revision;
+}
+
+void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
+{
+       kfree(cfg_handler);
+}
+
+struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
+               uint32_t major, uint32_t minor)
+{
+       struct drm_device *dev = mdp5_kms->dev;
+       struct platform_device *pdev = dev->platformdev;
+       struct mdp5_cfg_handler *cfg_handler;
+       struct mdp5_cfg_platform *pconfig;
+       int i, ret = 0;
+
+       cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
+       if (unlikely(!cfg_handler)) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       if (major != 1) {
+               dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
+                               major, minor);
+               ret = -ENXIO;
+               goto fail;
+       }
+
+       /* only after mdp5_cfg global pointer's init can we access the hw */
+       for (i = 0; i < ARRAY_SIZE(cfg_handlers); i++) {
+               if (cfg_handlers[i].revision != minor)
+                       continue;
+               mdp5_cfg = cfg_handlers[i].config.hw;
+
+               break;
+       }
+       if (unlikely(!mdp5_cfg)) {
+               dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
+                               major, minor);
+               ret = -ENXIO;
+               goto fail;
+       }
+
+       cfg_handler->revision = minor;
+       cfg_handler->config.hw = mdp5_cfg;
+
+       pconfig = mdp5_get_config(pdev);
+       memcpy(&cfg_handler->config.platform, pconfig, sizeof(*pconfig));
+
+       DBG("MDP5: %s hw config selected", mdp5_cfg->name);
+
+       return cfg_handler;
+
+fail:
+       if (cfg_handler)
+               mdp5_cfg_destroy(cfg_handler);
+
+       return NULL;
+}
+
+static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
+{
+       static struct mdp5_cfg_platform config = {};
+#ifdef CONFIG_OF
+       /* TODO */
+#endif
+       config.iommu = iommu_domain_alloc(&platform_bus_type);
+
+       return &config;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
new file mode 100644 (file)
index 0000000..dba4d52
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDP5_CFG_H__
+#define __MDP5_CFG_H__
+
+#include "msm_drv.h"
+
+/*
+ * mdp5_cfg
+ *
+ * This module configures the dynamic offsets used by mdp5.xml.h
+ * (initialized in mdp5_cfg.c)
+ */
+extern const struct mdp5_cfg_hw *mdp5_cfg;
+
+#define MAX_CTL                        8
+#define MAX_BASES              8
+#define MAX_SMP_BLOCKS         44
+#define MAX_CLIENTS            32
+
+typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
+
+#define MDP5_SUB_BLOCK_DEFINITION \
+       int count; \
+       uint32_t base[MAX_BASES]
+
+struct mdp5_sub_block {
+       MDP5_SUB_BLOCK_DEFINITION;
+};
+
+struct mdp5_lm_block {
+       MDP5_SUB_BLOCK_DEFINITION;
+       uint32_t nb_stages;             /* number of stages per blender */
+};
+
+struct mdp5_smp_block {
+       int mmb_count;                  /* number of SMP MMBs */
+       int mmb_size;                   /* MMB: size in bytes */
+       mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
+       int reserved[MAX_CLIENTS];      /* # of MMBs allocated per client */
+};
+
+struct mdp5_cfg_hw {
+       char  *name;
+
+       struct mdp5_smp_block smp;
+       struct mdp5_sub_block ctl;
+       struct mdp5_sub_block pipe_vig;
+       struct mdp5_sub_block pipe_rgb;
+       struct mdp5_sub_block pipe_dma;
+       struct mdp5_lm_block  lm;
+       struct mdp5_sub_block dspp;
+       struct mdp5_sub_block ad;
+       struct mdp5_sub_block intf;
+
+       uint32_t max_clk;
+};
+
+/* platform config data (ie. from DT, or pdata) */
+struct mdp5_cfg_platform {
+       struct iommu_domain *iommu;
+};
+
+struct mdp5_cfg {
+       const struct mdp5_cfg_hw *hw;
+       struct mdp5_cfg_platform platform;
+};
+
+struct mdp5_kms;
+struct mdp5_cfg_handler;
+
+const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd);
+struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
+int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
+
+struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
+               uint32_t major, uint32_t minor);
+void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
+
+#endif /* __MDP5_CFG_H__ */
index ebe2e60f3ab1147826e49a79b9772f4115635040..0e9a2e3a82d76e1e104fd1e136d8924755706586 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
 
 #include "mdp5_kms.h"
 
+#include <linux/sort.h>
 #include <drm/drm_mode.h>
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
 #include "drm_flip_work.h"
 
+#define SSPP_MAX       (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
+
 struct mdp5_crtc {
        struct drm_crtc base;
        char name[8];
-       struct drm_plane *plane;
-       struct drm_plane *planes[8];
        int id;
        bool enabled;
 
-       /* which mixer/encoder we route output to: */
-       int mixer;
+       /* layer mixer used for this CRTC (+ its lock): */
+#define GET_LM_ID(crtc_id)     ((crtc_id == 3) ? 5 : crtc_id)
+       int lm;
+       spinlock_t lm_lock;     /* protect REG_MDP5_LM_* registers */
+
+       /* CTL used for this CRTC: */
+       struct mdp5_ctl *ctl;
 
        /* if there is a pending flip, these will be non-null: */
        struct drm_pending_vblank_event *event;
-       struct msm_fence_cb pageflip_cb;
 
 #define PENDING_CURSOR 0x1
 #define PENDING_FLIP   0x2
        atomic_t pending;
 
-       /* the fb that we logically (from PoV of KMS API) hold a ref
-        * to.  Which we may not yet be scanning out (we may still
-        * be scanning out previous in case of page_flip while waiting
-        * for gpu rendering to complete:
-        */
-       struct drm_framebuffer *fb;
-
-       /* the fb that we currently hold a scanout ref to: */
-       struct drm_framebuffer *scanout_fb;
-
-       /* for unref'ing framebuffers after scanout completes: */
-       struct drm_flip_work unref_fb_work;
-
        struct mdp_irq vblank;
        struct mdp_irq err;
 };
@@ -73,67 +66,38 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
        mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
 }
 
-static void crtc_flush(struct drm_crtc *crtc)
-{
-       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
-       struct mdp5_kms *mdp5_kms = get_kms(crtc);
-       int id = mdp5_crtc->id;
-       uint32_t i, flush = 0;
-
-       for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
-               struct drm_plane *plane = mdp5_crtc->planes[i];
-               if (plane) {
-                       enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
-                       flush |= pipe2flush(pipe);
-               }
-       }
-       flush |= mixer2flush(mdp5_crtc->id);
-       flush |= MDP5_CTL_FLUSH_CTL;
-
-       DBG("%s: flush=%08x", mdp5_crtc->name, flush);
-
-       mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
-}
+#define mdp5_lm_get_flush(lm)  mdp_ctl_flush_mask_lm(lm)
 
-static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
+static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
-       struct drm_framebuffer *old_fb = mdp5_crtc->fb;
-
-       /* grab reference to incoming scanout fb: */
-       drm_framebuffer_reference(new_fb);
-       mdp5_crtc->base.primary->fb = new_fb;
-       mdp5_crtc->fb = new_fb;
 
-       if (old_fb)
-               drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb);
+       DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
+       mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
 }
 
-/* unlike update_fb(), take a ref to the new scanout fb *before* updating
- * plane, then call this.  Needed to ensure we don't unref the buffer that
- * is actually still being scanned out.
- *
- * Note that this whole thing goes away with atomic.. since we can defer
- * calling into driver until rendering is done.
+/*
+ * flush updates, to make sure hw is updated to new scanout fb,
+ * so that we can safely queue unref to current fb (ie. next
+ * vblank we know hw is done w/ previous scanout_fb).
  */
-static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+static void crtc_flush_all(struct drm_crtc *crtc)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       struct drm_plane *plane;
+       uint32_t flush_mask = 0;
 
-       /* flush updates, to make sure hw is updated to new scanout fb,
-        * so that we can safely queue unref to current fb (ie. next
-        * vblank we know hw is done w/ previous scanout_fb).
-        */
-       crtc_flush(crtc);
-
-       if (mdp5_crtc->scanout_fb)
-               drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
-                               mdp5_crtc->scanout_fb);
+       /* we could have already released CTL in the disable path: */
+       if (!mdp5_crtc->ctl)
+               return;
 
-       mdp5_crtc->scanout_fb = fb;
+       drm_atomic_crtc_for_each_plane(plane, crtc) {
+               flush_mask |= mdp5_plane_get_flush(plane);
+       }
+       flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
+       flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
 
-       /* enable vblank to complete flip: */
-       request_pending(crtc, PENDING_FLIP);
+       crtc_flush(crtc, flush_mask);
 }
 
 /* if file!=NULL, this is preclose potential cancel-flip path */
@@ -142,7 +106,8 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct drm_pending_vblank_event *event;
-       unsigned long flags, i;
+       struct drm_plane *plane;
+       unsigned long flags;
 
        spin_lock_irqsave(&dev->event_lock, flags);
        event = mdp5_crtc->event;
@@ -153,50 +118,22 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
                 */
                if (!file || (event->base.file_priv == file)) {
                        mdp5_crtc->event = NULL;
+                       DBG("%s: send event: %p", mdp5_crtc->name, event);
                        drm_send_vblank_event(dev, mdp5_crtc->id, event);
                }
        }
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
-       for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
-               struct drm_plane *plane = mdp5_crtc->planes[i];
-               if (plane)
-                       mdp5_plane_complete_flip(plane);
+       drm_atomic_crtc_for_each_plane(plane, crtc) {
+               mdp5_plane_complete_flip(plane);
        }
 }
 
-static void pageflip_cb(struct msm_fence_cb *cb)
-{
-       struct mdp5_crtc *mdp5_crtc =
-               container_of(cb, struct mdp5_crtc, pageflip_cb);
-       struct drm_crtc *crtc = &mdp5_crtc->base;
-       struct drm_framebuffer *fb = mdp5_crtc->fb;
-
-       if (!fb)
-               return;
-
-       drm_framebuffer_reference(fb);
-       mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
-       update_scanout(crtc, fb);
-}
-
-static void unref_fb_worker(struct drm_flip_work *work, void *val)
-{
-       struct mdp5_crtc *mdp5_crtc =
-               container_of(work, struct mdp5_crtc, unref_fb_work);
-       struct drm_device *dev = mdp5_crtc->base.dev;
-
-       mutex_lock(&dev->mode_config.mutex);
-       drm_framebuffer_unreference(val);
-       mutex_unlock(&dev->mode_config.mutex);
-}
-
 static void mdp5_crtc_destroy(struct drm_crtc *crtc)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 
        drm_crtc_cleanup(crtc);
-       drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
 
        kfree(mdp5_crtc);
 }
@@ -214,6 +151,8 @@ static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
                        mdp5_enable(mdp5_kms);
                        mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
                } else {
+                       /* set STAGE_UNUSED for all layers */
+                       mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
                        mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
                        mdp5_disable(mdp5_kms);
                }
@@ -228,54 +167,78 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
        return true;
 }
 
+/*
+ * blend_setup() - blend all the planes of a CRTC
+ *
+ * When border is enabled, the border color will ALWAYS be the base layer.
+ * Therefore, the first plane (private RGB pipe) will start at STAGE0.
+ * If disabled, the first plane starts at STAGE_BASE.
+ *
+ * Note:
+ * Border is not enabled here because the private plane is exactly
+ * the CRTC resolution.
+ */
 static void blend_setup(struct drm_crtc *crtc)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
        struct mdp5_kms *mdp5_kms = get_kms(crtc);
-       int id = mdp5_crtc->id;
+       struct drm_plane *plane;
+       const struct mdp5_cfg_hw *hw_cfg;
+       uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
+       unsigned long flags;
+#define blender(stage) ((stage) - STAGE_BASE)
 
-       /*
-        * Hard-coded setup for now until I figure out how the
-        * layer-mixer works
-        */
+       hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 
-       /* LM[id]: */
-       mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id),
-                       MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA);
-       mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0),
-                       MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
-                       MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
-                       MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
-       mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
-       mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
-
-       /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
-        * we want to be setting CTL[m].LAYER[n].  Not sure what the
-        * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
-        * used when chaining up mixers for high resolution displays?
-        */
+       spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
+
+       /* ctl could be released already when we are shutting down: */
+       if (!mdp5_crtc->ctl)
+               goto out;
 
-       /* CTL[id]: */
-       mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0),
-                       MDP5_CTL_LAYER_REG_RGB0(STAGE0) |
-                       MDP5_CTL_LAYER_REG_BORDER_COLOR);
-       mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0);
-       mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0);
-       mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0);
-       mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0);
+       drm_atomic_crtc_for_each_plane(plane, crtc) {
+               enum mdp_mixer_stage_id stage =
+                       to_mdp5_plane_state(plane->state)->stage;
+
+               /*
+                * Note: This cannot happen with current implementation but
+                * we need to check this condition once z property is added
+                */
+               BUG_ON(stage > hw_cfg->lm.nb_stages);
+
+               /* LM */
+               mdp5_write(mdp5_kms,
+                               REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
+                               MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+                               MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
+               mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
+                               blender(stage)), 0xff);
+               mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
+                               blender(stage)), 0x00);
+               /* CTL */
+               blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
+               DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
+                               pipe2name(mdp5_plane_pipe(plane)), stage);
+       }
+
+       DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
+       mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
+
+out:
+       spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
 }
 
-static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
-               struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode,
-               int x, int y,
-               struct drm_framebuffer *old_fb)
+static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
        struct mdp5_kms *mdp5_kms = get_kms(crtc);
-       int ret;
+       unsigned long flags;
+       struct drm_display_mode *mode;
 
-       mode = adjusted_mode;
+       if (WARN_ON(!crtc->state))
+               return;
+
+       mode = &crtc->state->adjusted_mode;
 
        DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
                        mdp5_crtc->name, mode->base.id, mode->name,
@@ -286,28 +249,11 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
                        mode->vsync_end, mode->vtotal,
                        mode->type, mode->flags);
 
-       /* grab extra ref for update_scanout() */
-       drm_framebuffer_reference(crtc->primary->fb);
-
-       ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->primary->fb,
-                       0, 0, mode->hdisplay, mode->vdisplay,
-                       x << 16, y << 16,
-                       mode->hdisplay << 16, mode->vdisplay << 16);
-       if (ret) {
-               drm_framebuffer_unreference(crtc->primary->fb);
-               dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
-                               mdp5_crtc->name, ret);
-               return ret;
-       }
-
-       mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
+       spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
+       mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
                        MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
                        MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
-
-       update_fb(crtc, crtc->primary->fb);
-       update_scanout(crtc, crtc->primary->fb);
-
-       return 0;
+       spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
 }
 
 static void mdp5_crtc_prepare(struct drm_crtc *crtc)
@@ -321,66 +267,119 @@ static void mdp5_crtc_prepare(struct drm_crtc *crtc)
 
 static void mdp5_crtc_commit(struct drm_crtc *crtc)
 {
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       DBG("%s", mdp5_crtc->name);
        mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
-       crtc_flush(crtc);
+       crtc_flush_all(crtc);
        /* drop the ref to mdp clk's that we got in prepare: */
        mdp5_disable(get_kms(crtc));
 }
 
-static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-               struct drm_framebuffer *old_fb)
+static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+struct plane_state {
+       struct drm_plane *plane;
+       struct mdp5_plane_state *state;
+};
+
+static int pstate_cmp(const void *a, const void *b)
+{
+       struct plane_state *pa = (struct plane_state *)a;
+       struct plane_state *pb = (struct plane_state *)b;
+       return pa->state->zpos - pb->state->zpos;
+}
+
+static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
+               struct drm_crtc_state *state)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
-       struct drm_plane *plane = mdp5_crtc->plane;
-       struct drm_display_mode *mode = &crtc->mode;
-       int ret;
-
-       /* grab extra ref for update_scanout() */
-       drm_framebuffer_reference(crtc->primary->fb);
-
-       ret = mdp5_plane_mode_set(plane, crtc, crtc->primary->fb,
-                       0, 0, mode->hdisplay, mode->vdisplay,
-                       x << 16, y << 16,
-                       mode->hdisplay << 16, mode->vdisplay << 16);
-       if (ret) {
-               drm_framebuffer_unreference(crtc->primary->fb);
-               return ret;
+       struct mdp5_kms *mdp5_kms = get_kms(crtc);
+       struct drm_plane *plane;
+       struct drm_device *dev = crtc->dev;
+       struct plane_state pstates[STAGE3 + 1];
+       int cnt = 0, i;
+
+       DBG("%s: check", mdp5_crtc->name);
+
+       if (mdp5_crtc->event) {
+               dev_err(dev->dev, "already pending flip!\n");
+               return -EBUSY;
        }
 
-       update_fb(crtc, crtc->primary->fb);
-       update_scanout(crtc, crtc->primary->fb);
+       /* request a free CTL, if none is already allocated for this CRTC */
+       if (state->enable && !mdp5_crtc->ctl) {
+               mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
+               if (WARN_ON(!mdp5_crtc->ctl))
+                       return -EINVAL;
+       }
+
+       /* verify that there are not too many planes attached to crtc
+        * and that we don't have conflicting mixer stages:
+        */
+       drm_atomic_crtc_state_for_each_plane(plane, state) {
+               struct drm_plane_state *pstate;
+
+               if (cnt >= ARRAY_SIZE(pstates)) {
+                       dev_err(dev->dev, "too many planes!\n");
+                       return -EINVAL;
+               }
+
+               pstate = state->state->plane_states[drm_plane_index(plane)];
+
+               /* plane might not have changed, in which case take
+                * current state:
+                */
+               if (!pstate)
+                       pstate = plane->state;
+
+               pstates[cnt].plane = plane;
+               pstates[cnt].state = to_mdp5_plane_state(pstate);
+
+               cnt++;
+       }
+
+       sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+
+       for (i = 0; i < cnt; i++) {
+               pstates[i].state->stage = STAGE_BASE + i;
+               DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
+                               pipe2name(mdp5_plane_pipe(pstates[i].plane)),
+                               pstates[i].state->stage);
+       }
 
        return 0;
 }
 
-static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
 {
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       DBG("%s: begin", mdp5_crtc->name);
 }
 
-static int mdp5_crtc_page_flip(struct drm_crtc *crtc,
-               struct drm_framebuffer *new_fb,
-               struct drm_pending_vblank_event *event,
-               uint32_t page_flip_flags)
+static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
        struct drm_device *dev = crtc->dev;
-       struct drm_gem_object *obj;
        unsigned long flags;
 
-       if (mdp5_crtc->event) {
-               dev_err(dev->dev, "already pending flip!\n");
-               return -EBUSY;
-       }
+       DBG("%s: flush", mdp5_crtc->name);
 
-       obj = msm_framebuffer_bo(new_fb, 0);
+       WARN_ON(mdp5_crtc->event);
 
        spin_lock_irqsave(&dev->event_lock, flags);
-       mdp5_crtc->event = event;
+       mdp5_crtc->event = crtc->state->event;
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
-       update_fb(crtc, new_fb);
+       blend_setup(crtc);
+       crtc_flush_all(crtc);
+       request_pending(crtc, PENDING_FLIP);
 
-       return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb);
+       if (mdp5_crtc->ctl && !crtc->state->enable) {
+               mdp5_ctl_release(mdp5_crtc->ctl);
+               mdp5_crtc->ctl = NULL;
+       }
 }
 
 static int mdp5_crtc_set_property(struct drm_crtc *crtc,
@@ -391,27 +390,33 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc,
 }
 
 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
-       .set_config = drm_crtc_helper_set_config,
+       .set_config = drm_atomic_helper_set_config,
        .destroy = mdp5_crtc_destroy,
-       .page_flip = mdp5_crtc_page_flip,
+       .page_flip = drm_atomic_helper_page_flip,
        .set_property = mdp5_crtc_set_property,
+       .reset = drm_atomic_helper_crtc_reset,
+       .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
 };
 
 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
        .dpms = mdp5_crtc_dpms,
        .mode_fixup = mdp5_crtc_mode_fixup,
-       .mode_set = mdp5_crtc_mode_set,
+       .mode_set_nofb = mdp5_crtc_mode_set_nofb,
+       .mode_set = drm_helper_crtc_mode_set,
+       .mode_set_base = drm_helper_crtc_mode_set_base,
        .prepare = mdp5_crtc_prepare,
        .commit = mdp5_crtc_commit,
-       .mode_set_base = mdp5_crtc_mode_set_base,
        .load_lut = mdp5_crtc_load_lut,
+       .atomic_check = mdp5_crtc_atomic_check,
+       .atomic_begin = mdp5_crtc_atomic_begin,
+       .atomic_flush = mdp5_crtc_atomic_flush,
 };
 
 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
 {
        struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
        struct drm_crtc *crtc = &mdp5_crtc->base;
-       struct msm_drm_private *priv = crtc->dev->dev_private;
        unsigned pending;
 
        mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
@@ -420,16 +425,14 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
 
        if (pending & PENDING_FLIP) {
                complete_flip(crtc, NULL);
-               drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
        }
 }
 
 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
 {
        struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
-       struct drm_crtc *crtc = &mdp5_crtc->base;
+
        DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
-       crtc_flush(crtc);
 }
 
 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
@@ -450,10 +453,9 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
        struct mdp5_kms *mdp5_kms = get_kms(crtc);
-       static const enum mdp5_intfnum intfnum[] = {
-                       INTF0, INTF1, INTF2, INTF3,
-       };
+       uint32_t flush_mask = 0;
        uint32_t intf_sel;
+       unsigned long flags;
 
        /* now that we know what irq's we want: */
        mdp5_crtc->err.irqmask = intf2err(intf);
@@ -463,6 +465,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
        if (!mdp5_kms)
                return;
 
+       spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
        intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
 
        switch (intf) {
@@ -487,45 +490,25 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
                break;
        }
 
-       blend_setup(crtc);
+       mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
+       spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
 
        DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
+       mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
+       flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
+       flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
 
-       mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
-       mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
-                       MDP5_CTL_OP_MODE(MODE_NONE) |
-                       MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
-
-       crtc_flush(crtc);
+       crtc_flush(crtc, flush_mask);
 }
 
-static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
-               struct drm_plane *plane)
+int mdp5_crtc_get_lm(struct drm_crtc *crtc)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 
-       BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes));
+       if (WARN_ON(!crtc))
+               return -EINVAL;
 
-       if (mdp5_crtc->planes[pipe_id] == plane)
-               return;
-
-       mdp5_crtc->planes[pipe_id] = plane;
-       blend_setup(crtc);
-       if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
-               crtc_flush(crtc);
-}
-
-void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
-{
-       set_attach(crtc, mdp5_plane_pipe(plane), plane);
-}
-
-void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
-{
-       /* don't actually detatch our primary plane: */
-       if (to_mdp5_crtc(crtc)->plane == plane)
-               return;
-       set_attach(crtc, mdp5_plane_pipe(plane), NULL);
+       return mdp5_crtc->lm;
 }
 
 /* initialize crtc */
@@ -534,18 +517,17 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
 {
        struct drm_crtc *crtc = NULL;
        struct mdp5_crtc *mdp5_crtc;
-       int ret;
 
        mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
-       if (!mdp5_crtc) {
-               ret = -ENOMEM;
-               goto fail;
-       }
+       if (!mdp5_crtc)
+               return ERR_PTR(-ENOMEM);
 
        crtc = &mdp5_crtc->base;
 
-       mdp5_crtc->plane = plane;
        mdp5_crtc->id = id;
+       mdp5_crtc->lm = GET_LM_ID(id);
+
+       spin_lock_init(&mdp5_crtc->lm_lock);
 
        mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
        mdp5_crtc->err.irq = mdp5_crtc_err_irq;
@@ -553,23 +535,11 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
        snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
                        pipe2name(mdp5_plane_pipe(plane)), id);
 
-       ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16,
-                       "unref fb", unref_fb_worker);
-       if (ret)
-               goto fail;
-
-       INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
-
        drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
        drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
+       plane->crtc = crtc;
 
-       mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base);
+       mdp5_plane_install_properties(plane, &crtc->base);
 
        return crtc;
-
-fail:
-       if (crtc)
-               mdp5_crtc_destroy(crtc);
-
-       return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
new file mode 100644 (file)
index 0000000..dea4505
--- /dev/null
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mdp5_kms.h"
+#include "mdp5_ctl.h"
+
+/*
+ * CTL - MDP Control Pool Manager
+ *
+ * Controls are shared between all CRTCs.
+ *
+ * They are intended to be used for data path configuration.
+ * The top level register programming describes the complete data path for
+ * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
+ *
+ * Hardware capabilities determine the number of concurrent data paths
+ *
+ * In certain use cases (high-resolution dual pipe), one single CTL can be
+ * shared across multiple CRTCs.
+ *
+ * Because the number of CTLs can be less than the number of CRTCs,
+ * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
+ * requested by the client (in mdp5_crtc_mode_set()).
+ */
+
+struct mdp5_ctl {
+       struct mdp5_ctl_manager *ctlm;
+
+       u32 id;
+
+       /* whether this CTL has been allocated or not: */
+       bool busy;
+
+       /* memory output connection (@see mdp5_ctl_mode): */
+       u32 mode;
+
+       /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
+       spinlock_t hw_lock;
+       u32 reg_offset;
+
+       /* flush mask used to commit CTL registers */
+       u32 flush_mask;
+
+       bool cursor_on;
+
+       struct drm_crtc *crtc;
+};
+
+struct mdp5_ctl_manager {
+       struct drm_device *dev;
+
+       /* number of CTL / Layer Mixers in this hw config: */
+       u32 nlm;
+       u32 nctl;
+
+       /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
+       spinlock_t pool_lock;
+       struct mdp5_ctl ctls[MAX_CTL];
+};
+
+static inline
+struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
+{
+       struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
+
+       return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static inline
+void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
+{
+       struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
+
+       (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
+       mdp5_write(mdp5_kms, reg, data);
+}
+
+static inline
+u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
+{
+       struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
+
+       (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
+       return mdp5_read(mdp5_kms, reg);
+}
+
+
+int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf)
+{
+       unsigned long flags;
+       static const enum mdp5_intfnum intfnum[] = {
+                       INTF0, INTF1, INTF2, INTF3,
+       };
+
+       spin_lock_irqsave(&ctl->hw_lock, flags);
+       ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id),
+                       MDP5_CTL_OP_MODE(ctl->mode) |
+                       MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
+       spin_unlock_irqrestore(&ctl->hw_lock, flags);
+
+       return 0;
+}
+
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
+{
+       struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+       unsigned long flags;
+       u32 blend_cfg;
+       int lm;
+
+       lm = mdp5_crtc_get_lm(ctl->crtc);
+       if (unlikely(WARN_ON(lm < 0))) {
+               dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
+                               ctl->id, lm);
+               return -EINVAL;
+       }
+
+       spin_lock_irqsave(&ctl->hw_lock, flags);
+
+       blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
+
+       if (enable)
+               blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
+       else
+               blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
+
+       ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+
+       spin_unlock_irqrestore(&ctl->hw_lock, flags);
+
+       ctl->cursor_on = enable;
+
+       return 0;
+}
+
+
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
+{
+       unsigned long flags;
+
+       if (ctl->cursor_on)
+               blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
+       else
+               blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
+
+       spin_lock_irqsave(&ctl->hw_lock, flags);
+       ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+       spin_unlock_irqrestore(&ctl->hw_lock, flags);
+
+       return 0;
+}
+
+int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
+{
+       struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+       unsigned long flags;
+
+       if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
+               int lm = mdp5_crtc_get_lm(ctl->crtc);
+
+               if (unlikely(WARN_ON(lm < 0))) {
+                       dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
+                                       ctl->id, lm);
+                       return -EINVAL;
+               }
+
+               /* for current targets, cursor bit is the same as LM bit */
+               flush_mask |= mdp_ctl_flush_mask_lm(lm);
+       }
+
+       spin_lock_irqsave(&ctl->hw_lock, flags);
+       ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
+       spin_unlock_irqrestore(&ctl->hw_lock, flags);
+
+       return 0;
+}
+
+u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
+{
+       return ctl->flush_mask;
+}
+
+void mdp5_ctl_release(struct mdp5_ctl *ctl)
+{
+       struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+       unsigned long flags;
+
+       if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
+               dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
+                               ctl->id, ctl->busy);
+               return;
+       }
+
+       spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
+       ctl->busy = false;
+       spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+
+       DBG("CTL %d released", ctl->id);
+}
+
+/*
+ * mdp5_ctl_request() - CTL dynamic allocation
+ *
+ * Note: Current implementation considers that we can only have one CRTC per CTL
+ *
+ * @return first free CTL
+ */
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
+               struct drm_crtc *crtc)
+{
+       struct mdp5_ctl *ctl = NULL;
+       unsigned long flags;
+       int c;
+
+       spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
+
+       for (c = 0; c < ctl_mgr->nctl; c++)
+               if (!ctl_mgr->ctls[c].busy)
+                       break;
+
+       if (unlikely(c >= ctl_mgr->nctl)) {
+               dev_err(ctl_mgr->dev->dev, "No more CTL available!");
+               goto unlock;
+       }
+
+       ctl = &ctl_mgr->ctls[c];
+
+       ctl->crtc = crtc;
+       ctl->busy = true;
+       DBG("CTL %d allocated", ctl->id);
+
+unlock:
+       spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+       return ctl;
+}
+
+void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
+{
+       unsigned long flags;
+       int c;
+
+       for (c = 0; c < ctl_mgr->nctl; c++) {
+               struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
+
+               spin_lock_irqsave(&ctl->hw_lock, flags);
+               ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
+               spin_unlock_irqrestore(&ctl->hw_lock, flags);
+       }
+}
+
+void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
+{
+       kfree(ctl_mgr);
+}
+
+struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
+               void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
+{
+       struct mdp5_ctl_manager *ctl_mgr;
+       const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
+       unsigned long flags;
+       int c, ret;
+
+       ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
+       if (!ctl_mgr) {
+               dev_err(dev->dev, "failed to allocate CTL manager\n");
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
+               dev_err(dev->dev, "Increase static pool size to at least %d\n",
+                               ctl_cfg->count);
+               ret = -ENOSPC;
+               goto fail;
+       }
+
+       /* initialize the CTL manager: */
+       ctl_mgr->dev = dev;
+       ctl_mgr->nlm = hw_cfg->lm.count;
+       ctl_mgr->nctl = ctl_cfg->count;
+       spin_lock_init(&ctl_mgr->pool_lock);
+
+       /* initialize each CTL of the pool: */
+       spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
+       for (c = 0; c < ctl_mgr->nctl; c++) {
+               struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
+
+               if (WARN_ON(!ctl_cfg->base[c])) {
+                       dev_err(dev->dev, "CTL_%d: base is null!\n", c);
+                       ret = -EINVAL;
+                       goto fail;
+               }
+               ctl->ctlm = ctl_mgr;
+               ctl->id = c;
+               ctl->mode = MODE_NONE;
+               ctl->reg_offset = ctl_cfg->base[c];
+               ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
+               ctl->busy = false;
+               spin_lock_init(&ctl->hw_lock);
+       }
+       spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+       DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
+
+       return ctl_mgr;
+
+fail:
+       if (ctl_mgr)
+               mdp5_ctlm_destroy(ctl_mgr);
+
+       return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
new file mode 100644 (file)
index 0000000..1018519
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDP5_CTL_H__
+#define __MDP5_CTL_H__
+
+#include "msm_drv.h"
+
+/*
+ * CTL Manager prototypes:
+ * mdp5_ctlm_init() returns a ctlm (CTL Manager) handler,
+ * which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions.
+ */
+struct mdp5_ctl_manager;
+struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
+               void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
+void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
+void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
+
+/*
+ * CTL prototypes:
+ * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
+ * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
+ */
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
+
+int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf);
+
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable);
+
+/* @blend_cfg: see LM blender config definition below */
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
+
+/* @flush_mask: see CTL flush masks definitions below */
+int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
+u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);
+
+void mdp5_ctl_release(struct mdp5_ctl *ctl);
+
+/*
+ * blend_cfg (LM blender config):
+ *
+ * The function below allows the caller of mdp5_ctl_blend() to specify how pipes
+ * are being blended according to their stage (z-order), through @blend_cfg arg.
+ */
+static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
+               enum mdp_mixer_stage_id stage)
+{
+       switch (pipe) {
+       case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
+       case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
+       case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
+       case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
+       case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
+       case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
+       case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
+       case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
+       case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
+       case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
+       default:        return 0;
+       }
+}
+
+/*
+ * flush_mask (CTL flush masks):
+ *
+ * The following functions allow each DRM entity to get and store
+ * their own flush mask.
+ * Once stored, these masks will then be accessed through each DRM's
+ * interface and used by the caller of mdp5_ctl_commit() to specify
+ * which block(s) need to be flushed through @flush_mask parameter.
+ */
+
+#define MDP5_CTL_FLUSH_CURSOR_DUMMY    0x80000000
+
+static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id)
+{
+       /* TODO: use id once multiple cursor support is present */
+       (void)cursor_id;
+
+       return MDP5_CTL_FLUSH_CURSOR_DUMMY;
+}
+
+static inline u32 mdp_ctl_flush_mask_lm(int lm)
+{
+       switch (lm) {
+       case 0:  return MDP5_CTL_FLUSH_LM0;
+       case 1:  return MDP5_CTL_FLUSH_LM1;
+       case 2:  return MDP5_CTL_FLUSH_LM2;
+       case 5:  return MDP5_CTL_FLUSH_LM5;
+       default: return 0;
+       }
+}
+
+static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
+{
+       switch (pipe) {
+       case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
+       case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
+       case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
+       case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
+       case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
+       case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
+       case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
+       case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
+       case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
+       case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
+       default:        return 0;
+       }
+}
+
+#endif /* __MDP5_CTL_H__ */
index edec7bfaa952310b34216c362a35b61adeba5c4e..0254bfdeb92feb1b9aa3ccbab03c20c447877947 100644 (file)
@@ -24,6 +24,7 @@ struct mdp5_encoder {
        struct drm_encoder base;
        int intf;
        enum mdp5_intf intf_id;
+       spinlock_t intf_lock;   /* protect REG_MDP5_INTF_* registers */
        bool enabled;
        uint32_t bsc;
 };
@@ -115,6 +116,7 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
        struct mdp5_kms *mdp5_kms = get_kms(encoder);
        int intf = mdp5_encoder->intf;
        bool enabled = (mode == DRM_MODE_DPMS_ON);
+       unsigned long flags;
 
        DBG("mode=%d", mode);
 
@@ -123,9 +125,24 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
 
        if (enabled) {
                bs_set(mdp5_encoder, 1);
+               spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
                mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
+               spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
        } else {
+               spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
                mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
+               spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+
+               /*
+                * Wait for a vsync so we know the ENABLE=0 latched before
+                * the (connector) source of the vsync's gets disabled,
+                * otherwise we end up in a funny state if we re-enable
+                * before the disable latches, which results that some of
+                * the settings changes for the new modeset (like new
+                * scanout buffer) don't latch properly..
+                */
+               mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf));
+
                bs_set(mdp5_encoder, 0);
        }
 
@@ -150,6 +167,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
        uint32_t display_v_start, display_v_end;
        uint32_t hsync_start_x, hsync_end_x;
        uint32_t format;
+       unsigned long flags;
 
        mode = adjusted_mode;
 
@@ -180,6 +198,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
        display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
        display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
 
+       spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
+
        mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
                        MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
                        MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
@@ -201,6 +221,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
        mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
        mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
        mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3);  /* frame+line? */
+
+       spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
 }
 
 static void mdp5_encoder_prepare(struct drm_encoder *encoder)
@@ -242,6 +264,8 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
        mdp5_encoder->intf_id = intf_id;
        encoder = &mdp5_encoder->base;
 
+       spin_lock_init(&mdp5_encoder->intf_lock);
+
        drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
                         DRM_MODE_ENCODER_TMDS);
        drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
index f2b985bc2adf41f8330dd4bfb8dcafdb30b43c53..70ac81edd40f3bb7d6de68553b7be350005312ec 100644 (file)
@@ -15,6 +15,8 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
 
 #include "msm_drv.h"
 #include "mdp5_kms.h"
@@ -88,11 +90,17 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
 
        VERB("intr=%08x", intr);
 
-       if (intr & MDP5_HW_INTR_STATUS_INTR_MDP)
+       if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) {
                mdp5_irq_mdp(mdp_kms);
+               intr &= ~MDP5_HW_INTR_STATUS_INTR_MDP;
+       }
 
-       if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI)
-               hdmi_irq(0, mdp5_kms->hdmi);
+       while (intr) {
+               irq_hw_number_t hwirq = fls(intr) - 1;
+               generic_handle_irq(irq_find_mapping(
+                               mdp5_kms->irqcontroller.domain, hwirq));
+               intr &= ~(1 << hwirq);
+       }
 
        return IRQ_HANDLED;
 }
@@ -109,3 +117,82 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
        mdp_update_vblank_mask(to_mdp_kms(kms),
                        mdp5_crtc_vblank(crtc), false);
 }
+
+/*
+ * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
+ * can register to get their irq's delivered
+ */
+
+#define VALID_IRQS  (MDP5_HW_INTR_STATUS_INTR_DSI0 | \
+               MDP5_HW_INTR_STATUS_INTR_DSI1 | \
+               MDP5_HW_INTR_STATUS_INTR_HDMI | \
+               MDP5_HW_INTR_STATUS_INTR_EDP)
+
+static void mdp5_hw_mask_irq(struct irq_data *irqd)
+{
+       struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
+       smp_mb__before_atomic();
+       clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
+       smp_mb__after_atomic();
+}
+
+static void mdp5_hw_unmask_irq(struct irq_data *irqd)
+{
+       struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
+       smp_mb__before_atomic();
+       set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
+       smp_mb__after_atomic();
+}
+
+static struct irq_chip mdp5_hw_irq_chip = {
+       .name           = "mdp5",
+       .irq_mask       = mdp5_hw_mask_irq,
+       .irq_unmask     = mdp5_hw_unmask_irq,
+};
+
+static int mdp5_hw_irqdomain_map(struct irq_domain *d,
+               unsigned int irq, irq_hw_number_t hwirq)
+{
+       struct mdp5_kms *mdp5_kms = d->host_data;
+
+       if (!(VALID_IRQS & (1 << hwirq)))
+               return -EPERM;
+
+       irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
+       irq_set_chip_data(irq, mdp5_kms);
+       set_irq_flags(irq, IRQF_VALID);
+
+       return 0;
+}
+
+static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
+       .map = mdp5_hw_irqdomain_map,
+       .xlate = irq_domain_xlate_onecell,
+};
+
+
+int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
+{
+       struct device *dev = mdp5_kms->dev->dev;
+       struct irq_domain *d;
+
+       d = irq_domain_add_linear(dev->of_node, 32,
+                       &mdp5_hw_irqdomain_ops, mdp5_kms);
+       if (!d) {
+               dev_err(dev, "mdp5 irq domain add failed\n");
+               return -ENXIO;
+       }
+
+       mdp5_kms->irqcontroller.enabled_mask = 0;
+       mdp5_kms->irqcontroller.domain = d;
+
+       return 0;
+}
+
+void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
+{
+       if (mdp5_kms->irqcontroller.domain) {
+               irq_domain_remove(mdp5_kms->irqcontroller.domain);
+               mdp5_kms->irqcontroller.domain = NULL;
+       }
+}
index 31a2c6331a1d5a47c57bb36e1a30482b67615e50..a11f1b80c488567d44755ad68eadc4d15dc61375 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -24,145 +25,11 @@ static const char *iommu_ports[] = {
                "mdp_0",
 };
 
-static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
-
-const struct mdp5_config *mdp5_cfg;
-
-static const struct mdp5_config msm8x74_config = {
-       .name = "msm8x74",
-       .ctl = {
-               .count = 5,
-               .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
-       },
-       .pipe_vig = {
-               .count = 3,
-               .base = { 0x01200, 0x01600, 0x01a00 },
-       },
-       .pipe_rgb = {
-               .count = 3,
-               .base = { 0x01e00, 0x02200, 0x02600 },
-       },
-       .pipe_dma = {
-               .count = 2,
-               .base = { 0x02a00, 0x02e00 },
-       },
-       .lm = {
-               .count = 5,
-               .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
-       },
-       .dspp = {
-               .count = 3,
-               .base = { 0x04600, 0x04a00, 0x04e00 },
-       },
-       .ad = {
-               .count = 2,
-               .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
-       },
-       .intf = {
-               .count = 4,
-               .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
-       },
-};
-
-static const struct mdp5_config apq8084_config = {
-       .name = "apq8084",
-       .ctl = {
-               .count = 5,
-               .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
-       },
-       .pipe_vig = {
-               .count = 4,
-               .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
-       },
-       .pipe_rgb = {
-               .count = 4,
-               .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
-       },
-       .pipe_dma = {
-               .count = 2,
-               .base = { 0x03200, 0x03600 },
-       },
-       .lm = {
-               .count = 6,
-               .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
-       },
-       .dspp = {
-               .count = 4,
-               .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
-
-       },
-       .ad = {
-               .count = 3,
-               .base = { 0x13500, 0x13700, 0x13900 },
-       },
-       .intf = {
-               .count = 5,
-               .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
-       },
-};
-
-struct mdp5_config_entry {
-       int revision;
-       const struct mdp5_config *config;
-};
-
-static const struct mdp5_config_entry mdp5_configs[] = {
-       { .revision = 0, .config = &msm8x74_config },
-       { .revision = 2, .config = &msm8x74_config },
-       { .revision = 3, .config = &apq8084_config },
-};
-
-static int mdp5_select_hw_cfg(struct msm_kms *kms)
-{
-       struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
-       struct drm_device *dev = mdp5_kms->dev;
-       uint32_t version, major, minor;
-       int i, ret = 0;
-
-       mdp5_enable(mdp5_kms);
-       version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
-       mdp5_disable(mdp5_kms);
-
-       major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
-       minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
-
-       DBG("found MDP5 version v%d.%d", major, minor);
-
-       if (major != 1) {
-               dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
-                               major, minor);
-               ret = -ENXIO;
-               goto out;
-       }
-
-       mdp5_kms->rev = minor;
-
-       /* only after mdp5_cfg global pointer's init can we access the hw */
-       for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) {
-               if (mdp5_configs[i].revision != minor)
-                       continue;
-               mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config;
-               break;
-       }
-       if (unlikely(!mdp5_kms->hw_cfg)) {
-               dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
-                               major, minor);
-               ret = -ENXIO;
-               goto out;
-       }
-
-       DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name);
-
-       return 0;
-out:
-       return ret;
-}
-
 static int mdp5_hw_init(struct msm_kms *kms)
 {
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
        struct drm_device *dev = mdp5_kms->dev;
-       int i;
+       unsigned long flags;
 
        pm_runtime_get_sync(dev->dev);
 
@@ -190,10 +57,11 @@ static int mdp5_hw_init(struct msm_kms *kms)
         * care.
         */
 
+       spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
        mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
+       spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
 
-       for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++)
-               mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0);
+       mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
 
        pm_runtime_put_sync(dev->dev);
 
@@ -221,10 +89,20 @@ static void mdp5_destroy(struct msm_kms *kms)
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
        struct msm_mmu *mmu = mdp5_kms->mmu;
 
+       mdp5_irq_domain_fini(mdp5_kms);
+
        if (mmu) {
                mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
                mmu->funcs->destroy(mmu);
        }
+
+       if (mdp5_kms->ctlm)
+               mdp5_ctlm_destroy(mdp5_kms->ctlm);
+       if (mdp5_kms->smp)
+               mdp5_smp_destroy(mdp5_kms->smp);
+       if (mdp5_kms->cfg)
+               mdp5_cfg_destroy(mdp5_kms->cfg);
+
        kfree(mdp5_kms);
 }
 
@@ -274,17 +152,31 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
        static const enum mdp5_pipe crtcs[] = {
                        SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
        };
+       static const enum mdp5_pipe pub_planes[] = {
+                       SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
+       };
        struct drm_device *dev = mdp5_kms->dev;
        struct msm_drm_private *priv = dev->dev_private;
        struct drm_encoder *encoder;
+       const struct mdp5_cfg_hw *hw_cfg;
        int i, ret;
 
-       /* construct CRTCs: */
-       for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) {
+       hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+       /* register our interrupt-controller for hdmi/eDP/dsi/etc
+        * to use for irqs routed through mdp:
+        */
+       ret = mdp5_irq_domain_init(mdp5_kms);
+       if (ret)
+               goto fail;
+
+       /* construct CRTCs and their private planes: */
+       for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
                struct drm_plane *plane;
                struct drm_crtc *crtc;
 
-               plane = mdp5_plane_init(dev, crtcs[i], true);
+               plane = mdp5_plane_init(dev, crtcs[i], true,
+                               hw_cfg->pipe_rgb.base[i]);
                if (IS_ERR(plane)) {
                        ret = PTR_ERR(plane);
                        dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
@@ -302,6 +194,20 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
                priv->crtcs[priv->num_crtcs++] = crtc;
        }
 
+       /* Construct public planes: */
+       for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
+               struct drm_plane *plane;
+
+               plane = mdp5_plane_init(dev, pub_planes[i], false,
+                               hw_cfg->pipe_vig.base[i]);
+               if (IS_ERR(plane)) {
+                       ret = PTR_ERR(plane);
+                       dev_err(dev->dev, "failed to construct %s plane: %d\n",
+                                       pipe2name(pub_planes[i]), ret);
+                       goto fail;
+               }
+       }
+
        /* Construct encoder for HDMI: */
        encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
        if (IS_ERR(encoder)) {
@@ -324,11 +230,12 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
        priv->encoders[priv->num_encoders++] = encoder;
 
        /* Construct bridge/connector for HDMI: */
-       mdp5_kms->hdmi = hdmi_init(dev, encoder);
-       if (IS_ERR(mdp5_kms->hdmi)) {
-               ret = PTR_ERR(mdp5_kms->hdmi);
-               dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
-               goto fail;
+       if (priv->hdmi) {
+               ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
+               if (ret) {
+                       dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+                       goto fail;
+               }
        }
 
        return 0;
@@ -337,6 +244,21 @@ fail:
        return ret;
 }
 
+static void read_hw_revision(struct mdp5_kms *mdp5_kms,
+               uint32_t *major, uint32_t *minor)
+{
+       uint32_t version;
+
+       mdp5_enable(mdp5_kms);
+       version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
+       mdp5_disable(mdp5_kms);
+
+       *major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
+       *minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
+
+       DBG("MDP5 version v%d.%d", *major, *minor);
+}
+
 static int get_clk(struct platform_device *pdev, struct clk **clkp,
                const char *name)
 {
@@ -353,10 +275,11 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
 struct msm_kms *mdp5_kms_init(struct drm_device *dev)
 {
        struct platform_device *pdev = dev->platformdev;
-       struct mdp5_platform_config *config = mdp5_get_config(pdev);
+       struct mdp5_cfg *config;
        struct mdp5_kms *mdp5_kms;
        struct msm_kms *kms = NULL;
        struct msm_mmu *mmu;
+       uint32_t major, minor;
        int i, ret;
 
        mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
@@ -366,12 +289,13 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
                goto fail;
        }
 
+       spin_lock_init(&mdp5_kms->resource_lock);
+
        mdp_kms_init(&mdp5_kms->base, &kms_funcs);
 
        kms = &mdp5_kms->base.base;
 
        mdp5_kms->dev = dev;
-       mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
 
        mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
        if (IS_ERR(mdp5_kms->mmio)) {
@@ -416,24 +340,52 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
        if (ret)
                goto fail;
 
-       ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
+       /* we need to set a default rate before enabling.  Set a safe
+        * rate first, then figure out hw revision, and then set a
+        * more optimal rate:
+        */
+       clk_set_rate(mdp5_kms->src_clk, 200000000);
+
+       read_hw_revision(mdp5_kms, &major, &minor);
 
-       ret = mdp5_select_hw_cfg(kms);
-       if (ret)
+       mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
+       if (IS_ERR(mdp5_kms->cfg)) {
+               ret = PTR_ERR(mdp5_kms->cfg);
+               mdp5_kms->cfg = NULL;
                goto fail;
+       }
+
+       config = mdp5_cfg_get_config(mdp5_kms->cfg);
+
+       /* TODO: compute core clock rate at runtime */
+       clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
+
+       mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
+       if (IS_ERR(mdp5_kms->smp)) {
+               ret = PTR_ERR(mdp5_kms->smp);
+               mdp5_kms->smp = NULL;
+               goto fail;
+       }
+
+       mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
+       if (IS_ERR(mdp5_kms->ctlm)) {
+               ret = PTR_ERR(mdp5_kms->ctlm);
+               mdp5_kms->ctlm = NULL;
+               goto fail;
+       }
 
        /* make sure things are off before attaching iommu (bootloader could
         * have left things on, in which case we'll start getting faults if
         * we don't disable):
         */
        mdp5_enable(mdp5_kms);
-       for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++)
+       for (i = 0; i < config->hw->intf.count; i++)
                mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
        mdp5_disable(mdp5_kms);
        mdelay(16);
 
-       if (config->iommu) {
-               mmu = msm_iommu_new(&pdev->dev, config->iommu);
+       if (config->platform.iommu) {
+               mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
                if (IS_ERR(mmu)) {
                        ret = PTR_ERR(mmu);
                        dev_err(dev->dev, "failed to init iommu: %d\n", ret);
@@ -474,18 +426,3 @@ fail:
                mdp5_destroy(kms);
        return ERR_PTR(ret);
 }
-
-static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
-{
-       static struct mdp5_platform_config config = {};
-#ifdef CONFIG_OF
-       /* TODO */
-#endif
-       config.iommu = iommu_domain_alloc(&platform_bus_type);
-       /* TODO hard-coded in downstream mdss, but should it be? */
-       config.max_clk = 200000000;
-       /* TODO get from DT: */
-       config.smp_blk_cnt = 22;
-
-       return &config;
-}
index 5bf340dd0f00afc05300fa0e68346acb005e59c4..dd69c77c0d64f3b1e4928850555d93292c1dbf9d 100644 (file)
 #include "msm_drv.h"
 #include "msm_kms.h"
 #include "mdp/mdp_kms.h"
-/* dynamic offsets used by mdp5.xml.h (initialized in mdp5_kms.c) */
-#define MDP5_MAX_BASES         8
-struct mdp5_sub_block {
-       int     count;
-       uint32_t base[MDP5_MAX_BASES];
-};
-struct mdp5_config {
-       char  *name;
-       struct mdp5_sub_block ctl;
-       struct mdp5_sub_block pipe_vig;
-       struct mdp5_sub_block pipe_rgb;
-       struct mdp5_sub_block pipe_dma;
-       struct mdp5_sub_block lm;
-       struct mdp5_sub_block dspp;
-       struct mdp5_sub_block ad;
-       struct mdp5_sub_block intf;
-};
-extern const struct mdp5_config *mdp5_cfg;
+#include "mdp5_cfg.h"  /* must be included before mdp5.xml.h */
 #include "mdp5.xml.h"
+#include "mdp5_ctl.h"
 #include "mdp5_smp.h"
 
 struct mdp5_kms {
@@ -47,17 +31,14 @@ struct mdp5_kms {
 
        struct drm_device *dev;
 
-       int rev;
-       const struct mdp5_config *hw_cfg;
+       struct mdp5_cfg_handler *cfg;
 
        /* mapper-id used to request GEM buffer mapped for scanout: */
        int id;
        struct msm_mmu *mmu;
 
-       /* for tracking smp allocation amongst pipes: */
-       mdp5_smp_state_t smp_state;
-       struct mdp5_client_smp_state smp_client_state[CID_MAX];
-       int smp_blk_cnt;
+       struct mdp5_smp *smp;
+       struct mdp5_ctl_manager *ctlm;
 
        /* io/register spaces: */
        void __iomem *mmio, *vbif;
@@ -71,18 +52,47 @@ struct mdp5_kms {
        struct clk *lut_clk;
        struct clk *vsync_clk;
 
-       struct hdmi *hdmi;
+       /*
+        * lock to protect access to global resources: ie., following register:
+        *      - REG_MDP5_DISP_INTF_SEL
+        */
+       spinlock_t resource_lock;
 
        struct mdp_irq error_handler;
+
+       struct {
+               volatile unsigned long enabled_mask;
+               struct irq_domain *domain;
+       } irqcontroller;
 };
 #define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
 
-/* platform config data (ie. from DT, or pdata) */
-struct mdp5_platform_config {
-       struct iommu_domain *iommu;
-       uint32_t max_clk;
-       int smp_blk_cnt;
+struct mdp5_plane_state {
+       struct drm_plane_state base;
+
+       /* "virtual" zpos.. we calculate actual mixer-stage at runtime
+        * by sorting the attached planes by zpos and then assigning
+        * mixer stage lowest to highest.  Private planes get default
+        * zpos of zero, and public planes a unique value that is
+        * greater than zero.  This way, things work out if a naive
+        * userspace assigns planes to a crtc without setting zpos.
+        */
+       int zpos;
+
+       /* the actual mixer stage, calculated in crtc->atomic_check()
+        * NOTE: this should move to mdp5_crtc_state, when that exists
+        */
+       enum mdp_mixer_stage_id stage;
+
+       /* some additional transactional status to help us know in the
+        * apply path whether we need to update SMP allocation, and
+        * whether current update is still pending:
+        */
+       bool mode_changed : 1;
+       bool pending : 1;
 };
+#define to_mdp5_plane_state(x) \
+               container_of(x, struct mdp5_plane_state, base)
 
 static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
 {
@@ -107,23 +117,6 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
        return names[pipe];
 }
 
-static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
-{
-       switch (pipe) {
-       case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
-       case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
-       case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
-       case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
-       case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
-       case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
-       case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
-       case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
-       case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
-       case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
-       default:        return 0;
-       }
-}
-
 static inline int pipe2nclients(enum mdp5_pipe pipe)
 {
        switch (pipe) {
@@ -137,34 +130,6 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
        }
 }
 
-static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
-{
-       WARN_ON(plane >= pipe2nclients(pipe));
-       switch (pipe) {
-       case SSPP_VIG0: return CID_VIG0_Y + plane;
-       case SSPP_VIG1: return CID_VIG1_Y + plane;
-       case SSPP_VIG2: return CID_VIG2_Y + plane;
-       case SSPP_RGB0: return CID_RGB0;
-       case SSPP_RGB1: return CID_RGB1;
-       case SSPP_RGB2: return CID_RGB2;
-       case SSPP_DMA0: return CID_DMA0_Y + plane;
-       case SSPP_DMA1: return CID_DMA1_Y + plane;
-       case SSPP_VIG3: return CID_VIG3_Y + plane;
-       case SSPP_RGB3: return CID_RGB3;
-       default:        return CID_UNUSED;
-       }
-}
-
-static inline uint32_t mixer2flush(int lm)
-{
-       switch (lm) {
-       case 0:  return MDP5_CTL_FLUSH_LM0;
-       case 1:  return MDP5_CTL_FLUSH_LM1;
-       case 2:  return MDP5_CTL_FLUSH_LM2;
-       default: return 0;
-       }
-}
-
 static inline uint32_t intf2err(int intf)
 {
        switch (intf) {
@@ -197,6 +162,8 @@ void mdp5_irq_uninstall(struct msm_kms *kms);
 irqreturn_t mdp5_irq(struct msm_kms *kms);
 int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
 void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
+void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
 
 static inline
 uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
@@ -210,26 +177,18 @@ uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
 
 void mdp5_plane_install_properties(struct drm_plane *plane,
                struct drm_mode_object *obj);
-void mdp5_plane_set_scanout(struct drm_plane *plane,
-               struct drm_framebuffer *fb);
-int mdp5_plane_mode_set(struct drm_plane *plane,
-               struct drm_crtc *crtc, struct drm_framebuffer *fb,
-               int crtc_x, int crtc_y,
-               unsigned int crtc_w, unsigned int crtc_h,
-               uint32_t src_x, uint32_t src_y,
-               uint32_t src_w, uint32_t src_h);
+uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
 void mdp5_plane_complete_flip(struct drm_plane *plane);
 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
 struct drm_plane *mdp5_plane_init(struct drm_device *dev,
-               enum mdp5_pipe pipe, bool private_plane);
+               enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
 
 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
 
+int mdp5_crtc_get_lm(struct drm_crtc *crtc);
 void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
 void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
                enum mdp5_intf intf_id);
-void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
-void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
                struct drm_plane *plane, int id);
 
index f3daec4412ad3e9fd6b8f7303a2b27280df7442c..26e5fdea6594c7d23854bc566ffe618453c455dd 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -17,6 +18,7 @@
 
 #include "mdp5_kms.h"
 
+#define MAX_PLANE      4
 
 struct mdp5_plane {
        struct drm_plane base;
@@ -24,6 +26,11 @@ struct mdp5_plane {
 
        enum mdp5_pipe pipe;
 
+       spinlock_t pipe_lock;   /* protect REG_MDP5_PIPE_* registers */
+       uint32_t reg_offset;
+
+       uint32_t flush_mask;    /* used to commit pipe registers */
+
        uint32_t nformats;
        uint32_t formats[32];
 
@@ -31,31 +38,24 @@ struct mdp5_plane {
 };
 #define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
 
+static int mdp5_plane_mode_set(struct drm_plane *plane,
+               struct drm_crtc *crtc, struct drm_framebuffer *fb,
+               int crtc_x, int crtc_y,
+               unsigned int crtc_w, unsigned int crtc_h,
+               uint32_t src_x, uint32_t src_y,
+               uint32_t src_w, uint32_t src_h);
+static void set_scanout_locked(struct drm_plane *plane,
+               struct drm_framebuffer *fb);
+
 static struct mdp5_kms *get_kms(struct drm_plane *plane)
 {
        struct msm_drm_private *priv = plane->dev->dev_private;
        return to_mdp5_kms(to_mdp_kms(priv->kms));
 }
 
-static int mdp5_plane_update(struct drm_plane *plane,
-               struct drm_crtc *crtc, struct drm_framebuffer *fb,
-               int crtc_x, int crtc_y,
-               unsigned int crtc_w, unsigned int crtc_h,
-               uint32_t src_x, uint32_t src_y,
-               uint32_t src_w, uint32_t src_h)
+static bool plane_enabled(struct drm_plane_state *state)
 {
-       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
-
-       mdp5_plane->enabled = true;
-
-       if (plane->fb)
-               drm_framebuffer_unreference(plane->fb);
-
-       drm_framebuffer_reference(fb);
-
-       return mdp5_plane_mode_set(plane, crtc, fb,
-                       crtc_x, crtc_y, crtc_w, crtc_h,
-                       src_x, src_y, src_w, src_h);
+       return state->fb && state->crtc;
 }
 
 static int mdp5_plane_disable(struct drm_plane *plane)
@@ -63,21 +63,13 @@ static int mdp5_plane_disable(struct drm_plane *plane)
        struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
        struct mdp5_kms *mdp5_kms = get_kms(plane);
        enum mdp5_pipe pipe = mdp5_plane->pipe;
-       int i;
 
        DBG("%s: disable", mdp5_plane->name);
 
-       /* update our SMP request to zero (release all our blks): */
-       for (i = 0; i < pipe2nclients(pipe); i++)
-               mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0);
-
-       /* TODO detaching now will cause us not to get the last
-        * vblank and mdp5_smp_commit().. so other planes will
-        * still see smp blocks previously allocated to us as
-        * in-use..
-        */
-       if (plane->crtc)
-               mdp5_crtc_detach(plane->crtc, plane);
+       if (mdp5_kms) {
+               /* Release the memory we requested earlier from the SMP: */
+               mdp5_smp_release(mdp5_kms->smp, pipe);
+       }
 
        return 0;
 }
@@ -85,11 +77,8 @@ static int mdp5_plane_disable(struct drm_plane *plane)
 static void mdp5_plane_destroy(struct drm_plane *plane)
 {
        struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
-       struct msm_drm_private *priv = plane->dev->dev_private;
-
-       if (priv->kms)
-               mdp5_plane_disable(plane);
 
+       drm_plane_helper_disable(plane);
        drm_plane_cleanup(plane);
 
        kfree(mdp5_plane);
@@ -109,109 +98,186 @@ int mdp5_plane_set_property(struct drm_plane *plane,
        return -EINVAL;
 }
 
+static void mdp5_plane_reset(struct drm_plane *plane)
+{
+       struct mdp5_plane_state *mdp5_state;
+
+       if (plane->state && plane->state->fb)
+               drm_framebuffer_unreference(plane->state->fb);
+
+       kfree(to_mdp5_plane_state(plane->state));
+       mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
+
+       if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+               mdp5_state->zpos = 0;
+       } else {
+               mdp5_state->zpos = 1 + drm_plane_index(plane);
+       }
+
+       plane->state = &mdp5_state->base;
+}
+
+static struct drm_plane_state *
+mdp5_plane_duplicate_state(struct drm_plane *plane)
+{
+       struct mdp5_plane_state *mdp5_state;
+
+       if (WARN_ON(!plane->state))
+               return NULL;
+
+       mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
+                       sizeof(*mdp5_state), GFP_KERNEL);
+
+       if (mdp5_state && mdp5_state->base.fb)
+               drm_framebuffer_reference(mdp5_state->base.fb);
+
+       mdp5_state->mode_changed = false;
+       mdp5_state->pending = false;
+
+       return &mdp5_state->base;
+}
+
+static void mdp5_plane_destroy_state(struct drm_plane *plane,
+               struct drm_plane_state *state)
+{
+       if (state->fb)
+               drm_framebuffer_unreference(state->fb);
+
+       kfree(to_mdp5_plane_state(state));
+}
+
 static const struct drm_plane_funcs mdp5_plane_funcs = {
-               .update_plane = mdp5_plane_update,
-               .disable_plane = mdp5_plane_disable,
+               .update_plane = drm_atomic_helper_update_plane,
+               .disable_plane = drm_atomic_helper_disable_plane,
                .destroy = mdp5_plane_destroy,
                .set_property = mdp5_plane_set_property,
+               .reset = mdp5_plane_reset,
+               .atomic_duplicate_state = mdp5_plane_duplicate_state,
+               .atomic_destroy_state = mdp5_plane_destroy_state,
 };
 
-void mdp5_plane_set_scanout(struct drm_plane *plane,
+static int mdp5_plane_prepare_fb(struct drm_plane *plane,
                struct drm_framebuffer *fb)
 {
        struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
        struct mdp5_kms *mdp5_kms = get_kms(plane);
-       enum mdp5_pipe pipe = mdp5_plane->pipe;
-       uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
-       uint32_t iova[4];
-       int i;
-
-       for (i = 0; i < nplanes; i++) {
-               struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
-               msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
-       }
-       for (; i < 4; i++)
-               iova[i] = 0;
 
-       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
-                       MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
-                       MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
-
-       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
-                       MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
-                       MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
-
-       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]);
-       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]);
-       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]);
-       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]);
-
-       plane->fb = fb;
+       DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
+       return msm_framebuffer_prepare(fb, mdp5_kms->id);
 }
 
-/* NOTE: looks like if horizontal decimation is used (if we supported that)
- * then the width used to calculate SMP block requirements is the post-
- * decimated width.  Ie. SMP buffering sits downstream of decimation (which
- * presumably happens during the dma from scanout buffer).
- */
-static int request_smp_blocks(struct drm_plane *plane, uint32_t format,
-               uint32_t nplanes, uint32_t width)
+static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
+               struct drm_framebuffer *fb)
 {
-       struct drm_device *dev = plane->dev;
        struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
        struct mdp5_kms *mdp5_kms = get_kms(plane);
-       enum mdp5_pipe pipe = mdp5_plane->pipe;
-       int i, hsub, nlines, nblks, ret;
 
-       hsub = drm_format_horz_chroma_subsampling(format);
+       DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
+       msm_framebuffer_cleanup(fb, mdp5_kms->id);
+}
 
-       /* different if BWC (compressed framebuffer?) enabled: */
-       nlines = 2;
+static int mdp5_plane_atomic_check(struct drm_plane *plane,
+               struct drm_plane_state *state)
+{
+       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+       struct drm_plane_state *old_state = plane->state;
 
-       for (i = 0, nblks = 0; i < nplanes; i++) {
-               int n, fetch_stride, cpp;
+       DBG("%s: check (%d -> %d)", mdp5_plane->name,
+                       plane_enabled(old_state), plane_enabled(state));
 
-               cpp = drm_format_plane_cpp(format, i);
-               fetch_stride = width * cpp / (i ? hsub : 1);
+       if (plane_enabled(state) && plane_enabled(old_state)) {
+               /* we cannot change SMP block configuration during scanout: */
+               bool full_modeset = false;
+               if (state->fb->pixel_format != old_state->fb->pixel_format) {
+                       DBG("%s: pixel_format change!", mdp5_plane->name);
+                       full_modeset = true;
+               }
+               if (state->src_w != old_state->src_w) {
+                       DBG("%s: src_w change!", mdp5_plane->name);
+                       full_modeset = true;
+               }
+               if (to_mdp5_plane_state(old_state)->pending) {
+                       DBG("%s: still pending!", mdp5_plane->name);
+                       full_modeset = true;
+               }
+               if (full_modeset) {
+                       struct drm_crtc_state *crtc_state =
+                                       drm_atomic_get_crtc_state(state->state, state->crtc);
+                       crtc_state->mode_changed = true;
+                       to_mdp5_plane_state(state)->mode_changed = true;
+               }
+       } else {
+               to_mdp5_plane_state(state)->mode_changed = true;
+       }
 
-               n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE);
+       return 0;
+}
 
-               /* for hw rev v1.00 */
-               if (mdp5_kms->rev == 0)
-                       n = roundup_pow_of_two(n);
+static void mdp5_plane_atomic_update(struct drm_plane *plane,
+                                    struct drm_plane_state *old_state)
+{
+       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+       struct drm_plane_state *state = plane->state;
 
-               DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n);
-               ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n);
-               if (ret) {
-                       dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n",
-                                       n, ret);
-                       return ret;
-               }
+       DBG("%s: update", mdp5_plane->name);
 
-               nblks += n;
+       if (!plane_enabled(state)) {
+               to_mdp5_plane_state(state)->pending = true;
+               mdp5_plane_disable(plane);
+       } else if (to_mdp5_plane_state(state)->mode_changed) {
+               int ret;
+               to_mdp5_plane_state(state)->pending = true;
+               ret = mdp5_plane_mode_set(plane,
+                               state->crtc, state->fb,
+                               state->crtc_x, state->crtc_y,
+                               state->crtc_w, state->crtc_h,
+                               state->src_x,  state->src_y,
+                               state->src_w, state->src_h);
+               /* atomic_check should have ensured that this doesn't fail */
+               WARN_ON(ret < 0);
+       } else {
+               unsigned long flags;
+               spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
+               set_scanout_locked(plane, state->fb);
+               spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
        }
-
-       /* in success case, return total # of blocks allocated: */
-       return nblks;
 }
 
-static void set_fifo_thresholds(struct drm_plane *plane, int nblks)
+static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
+               .prepare_fb = mdp5_plane_prepare_fb,
+               .cleanup_fb = mdp5_plane_cleanup_fb,
+               .atomic_check = mdp5_plane_atomic_check,
+               .atomic_update = mdp5_plane_atomic_update,
+};
+
+static void set_scanout_locked(struct drm_plane *plane,
+               struct drm_framebuffer *fb)
 {
        struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
        struct mdp5_kms *mdp5_kms = get_kms(plane);
        enum mdp5_pipe pipe = mdp5_plane->pipe;
-       uint32_t val;
 
-       /* 1/4 of SMP pool that is being fetched */
-       val = (nblks * SMP_ENTRIES_PER_BLK) / 4;
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
+                       MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+                       MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
 
-       mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
-       mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
-       mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
+                       MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+                       MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
+                       msm_framebuffer_iova(fb, mdp5_kms->id, 0));
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
+                       msm_framebuffer_iova(fb, mdp5_kms->id, 1));
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
+                       msm_framebuffer_iova(fb, mdp5_kms->id, 2));
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
+                       msm_framebuffer_iova(fb, mdp5_kms->id, 4));
 
+       plane->fb = fb;
 }
 
-int mdp5_plane_mode_set(struct drm_plane *plane,
+static int mdp5_plane_mode_set(struct drm_plane *plane,
                struct drm_crtc *crtc, struct drm_framebuffer *fb,
                int crtc_x, int crtc_y,
                unsigned int crtc_w, unsigned int crtc_h,
@@ -225,7 +291,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
        uint32_t nplanes, config = 0;
        uint32_t phasex_step = 0, phasey_step = 0;
        uint32_t hdecm = 0, vdecm = 0;
-       int i, nblks;
+       unsigned long flags;
+       int ret;
 
        nplanes = drm_format_num_planes(fb->pixel_format);
 
@@ -243,12 +310,11 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
                        fb->base.id, src_x, src_y, src_w, src_h,
                        crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
 
-       /*
-        * Calculate and request required # of smp blocks:
-        */
-       nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w);
-       if (nblks < 0)
-               return nblks;
+       /* Request some memory from the SMP: */
+       ret = mdp5_smp_request(mdp5_kms->smp,
+                       mdp5_plane->pipe, fb->pixel_format, src_w);
+       if (ret)
+               return ret;
 
        /*
         * Currently we update the hw for allocations/requests immediately,
@@ -256,8 +322,7 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
         * would move into atomic->check_plane_state(), while updating the
         * hw would remain here:
         */
-       for (i = 0; i < pipe2nclients(pipe); i++)
-               mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i));
+       mdp5_smp_configure(mdp5_kms->smp, pipe);
 
        if (src_w != crtc_w) {
                config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
@@ -269,6 +334,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
                /* TODO calc phasey_step, vdecm */
        }
 
+       spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
+
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
                        MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
                        MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
@@ -289,8 +356,6 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
                        MDP5_PIPE_OUT_XY_X(crtc_x) |
                        MDP5_PIPE_OUT_XY_Y(crtc_y));
 
-       mdp5_plane_set_scanout(plane, fb);
-
        format = to_mdp_format(msm_framebuffer_format(fb));
 
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
@@ -330,22 +395,24 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
                        MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
                        MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
 
-       set_fifo_thresholds(plane, nblks);
+       set_scanout_locked(plane, fb);
 
-       /* TODO detach from old crtc (if we had more than one) */
-       mdp5_crtc_attach(crtc, plane);
+       spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
 
-       return 0;
+       return ret;
 }
 
 void mdp5_plane_complete_flip(struct drm_plane *plane)
 {
        struct mdp5_kms *mdp5_kms = get_kms(plane);
-       enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe;
-       int i;
+       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+       enum mdp5_pipe pipe = mdp5_plane->pipe;
+
+       DBG("%s: complete flip", mdp5_plane->name);
 
-       for (i = 0; i < pipe2nclients(pipe); i++)
-               mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i));
+       mdp5_smp_commit(mdp5_kms->smp, pipe);
+
+       to_mdp5_plane_state(plane->state)->pending = false;
 }
 
 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
@@ -354,9 +421,16 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
        return mdp5_plane->pipe;
 }
 
+uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
+{
+       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+       return mdp5_plane->flush_mask;
+}
+
 /* initialize plane */
 struct drm_plane *mdp5_plane_init(struct drm_device *dev,
-               enum mdp5_pipe pipe, bool private_plane)
+               enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
 {
        struct drm_plane *plane = NULL;
        struct mdp5_plane *mdp5_plane;
@@ -377,10 +451,18 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
        mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
                        ARRAY_SIZE(mdp5_plane->formats));
 
+       mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
+       mdp5_plane->reg_offset = reg_offset;
+       spin_lock_init(&mdp5_plane->pipe_lock);
+
        type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
-       drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
+       ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
                                 mdp5_plane->formats, mdp5_plane->nformats,
                                 type);
+       if (ret)
+               goto fail;
+
+       drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
 
        mdp5_plane_install_properties(plane, &plane->base);
 
index 2d0236b963a6f76179c897241cee1ff7bcd034b1..bf551885e0197ed7421f32ab8976b66580e41dd5 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
  * Based on the size of the attached scanout buffer, a certain # of
  * blocks must be allocated to that client out of the shared pool.
  *
- * For each block, it can be either free, or pending/in-use by a
- * client.  The updates happen in three steps:
+ * In some hw, some blocks are statically allocated for certain pipes
+ * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
+ *
+ * For each block that can be dynamically allocated, it can be either
+ * free, or pending/in-use by a client. The updates happen in three steps:
  *
  *  1) mdp5_smp_request():
  *     When plane scanout is setup, calculate required number of
  * inuse and pending state of all clients..
  */
 
-static DEFINE_SPINLOCK(smp_lock);
+struct mdp5_smp {
+       struct drm_device *dev;
+
+       int blk_cnt;
+       int blk_size;
+
+       spinlock_t state_lock;
+       mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
+
+       struct mdp5_client_smp_state client_state[CID_MAX];
+};
 
+static inline
+struct mdp5_kms *get_kms(struct mdp5_smp *smp)
+{
+       struct msm_drm_private *priv = smp->dev->dev_private;
+
+       return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
+{
+       WARN_ON(plane >= pipe2nclients(pipe));
+       switch (pipe) {
+       case SSPP_VIG0: return CID_VIG0_Y + plane;
+       case SSPP_VIG1: return CID_VIG1_Y + plane;
+       case SSPP_VIG2: return CID_VIG2_Y + plane;
+       case SSPP_RGB0: return CID_RGB0;
+       case SSPP_RGB1: return CID_RGB1;
+       case SSPP_RGB2: return CID_RGB2;
+       case SSPP_DMA0: return CID_DMA0_Y + plane;
+       case SSPP_DMA1: return CID_DMA1_Y + plane;
+       case SSPP_VIG3: return CID_VIG3_Y + plane;
+       case SSPP_RGB3: return CID_RGB3;
+       default:        return CID_UNUSED;
+       }
+}
 
 /* step #1: update # of blocks pending for the client: */
-int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
+static int smp_request_block(struct mdp5_smp *smp,
                enum mdp5_client_id cid, int nblks)
 {
-       struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
-       int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt;
+       struct mdp5_kms *mdp5_kms = get_kms(smp);
+       const struct mdp5_cfg_hw *hw_cfg;
+       struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+       int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
+       int reserved;
        unsigned long flags;
 
-       spin_lock_irqsave(&smp_lock, flags);
+       hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+       reserved = hw_cfg->smp.reserved[cid];
+
+       spin_lock_irqsave(&smp->state_lock, flags);
 
-       avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt);
+       nblks -= reserved;
+       if (reserved)
+               DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
+
+       avail = cnt - bitmap_weight(smp->state, cnt);
        if (nblks > avail) {
+               dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
+                               nblks, avail);
                ret = -ENOSPC;
                goto fail;
        }
@@ -84,9 +135,9 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
        if (nblks > cur_nblks) {
                /* grow the existing pending reservation: */
                for (i = cur_nblks; i < nblks; i++) {
-                       int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt);
+                       int blk = find_first_zero_bit(smp->state, cnt);
                        set_bit(blk, ps->pending);
-                       set_bit(blk, mdp5_kms->smp_state);
+                       set_bit(blk, smp->state);
                }
        } else {
                /* shrink the existing pending reservation: */
@@ -98,15 +149,88 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
        }
 
 fail:
-       spin_unlock_irqrestore(&smp_lock, flags);
+       spin_unlock_irqrestore(&smp->state_lock, flags);
+       return 0;
+}
+
+static void set_fifo_thresholds(struct mdp5_smp *smp,
+               enum mdp5_pipe pipe, int nblks)
+{
+       struct mdp5_kms *mdp5_kms = get_kms(smp);
+       u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
+       u32 val;
+
+       /* 1/4 of SMP pool that is being fetched */
+       val = (nblks * smp_entries_per_blk) / 4;
+
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
+}
+
+/*
+ * NOTE: looks like if horizontal decimation is used (if we supported that)
+ * then the width used to calculate SMP block requirements is the post-
+ * decimated width.  Ie. SMP buffering sits downstream of decimation (which
+ * presumably happens during the dma from scanout buffer).
+ */
+int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
+{
+       struct mdp5_kms *mdp5_kms = get_kms(smp);
+       struct drm_device *dev = mdp5_kms->dev;
+       int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
+       int i, hsub, nplanes, nlines, nblks, ret;
+
+       nplanes = drm_format_num_planes(fmt);
+       hsub = drm_format_horz_chroma_subsampling(fmt);
+
+       /* different if BWC (compressed framebuffer?) enabled: */
+       nlines = 2;
+
+       for (i = 0, nblks = 0; i < nplanes; i++) {
+               int n, fetch_stride, cpp;
+
+               cpp = drm_format_plane_cpp(fmt, i);
+               fetch_stride = width * cpp / (i ? hsub : 1);
+
+               n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
+
+               /* for hw rev v1.00 */
+               if (rev == 0)
+                       n = roundup_pow_of_two(n);
+
+               DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
+               ret = smp_request_block(smp, pipe2client(pipe, i), n);
+               if (ret) {
+                       dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
+                                       n, ret);
+                       return ret;
+               }
+
+               nblks += n;
+       }
+
+       set_fifo_thresholds(smp, pipe, nblks);
+
        return 0;
 }
 
-static void update_smp_state(struct mdp5_kms *mdp5_kms,
+/* Release SMP blocks for all clients of the pipe */
+void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
+{
+       int i, nblks;
+
+       for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
+               smp_request_block(smp, pipe2client(pipe, i), 0);
+       set_fifo_thresholds(smp, pipe, 0);
+}
+
+static void update_smp_state(struct mdp5_smp *smp,
                enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
 {
-       int cnt = mdp5_kms->smp_blk_cnt;
-       uint32_t blk, val;
+       struct mdp5_kms *mdp5_kms = get_kms(smp);
+       int cnt = smp->blk_cnt;
+       u32 blk, val;
 
        for_each_set_bit(blk, *assigned, cnt) {
                int idx = blk / 3;
@@ -135,39 +259,80 @@ static void update_smp_state(struct mdp5_kms *mdp5_kms,
 }
 
 /* step #2: configure hw for union(pending, inuse): */
-void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
 {
-       struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
-       int cnt = mdp5_kms->smp_blk_cnt;
+       int cnt = smp->blk_cnt;
        mdp5_smp_state_t assigned;
+       int i;
 
-       bitmap_or(assigned, ps->inuse, ps->pending, cnt);
-       update_smp_state(mdp5_kms, cid, &assigned);
+       for (i = 0; i < pipe2nclients(pipe); i++) {
+               enum mdp5_client_id cid = pipe2client(pipe, i);
+               struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+
+               bitmap_or(assigned, ps->inuse, ps->pending, cnt);
+               update_smp_state(smp, cid, &assigned);
+       }
 }
 
 /* step #3: after vblank, copy pending -> inuse: */
-void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
 {
-       struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
-       int cnt = mdp5_kms->smp_blk_cnt;
+       int cnt = smp->blk_cnt;
        mdp5_smp_state_t released;
+       int i;
+
+       for (i = 0; i < pipe2nclients(pipe); i++) {
+               enum mdp5_client_id cid = pipe2client(pipe, i);
+               struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+
+               /*
+                * Figure out if there are any blocks we where previously
+                * using, which can be released and made available to other
+                * clients:
+                */
+               if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
+                       unsigned long flags;
+
+                       spin_lock_irqsave(&smp->state_lock, flags);
+                       /* clear released blocks: */
+                       bitmap_andnot(smp->state, smp->state, released, cnt);
+                       spin_unlock_irqrestore(&smp->state_lock, flags);
 
-       /*
-        * Figure out if there are any blocks we where previously
-        * using, which can be released and made available to other
-        * clients:
-        */
-       if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&smp_lock, flags);
-               /* clear released blocks: */
-               bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
-                               released, cnt);
-               spin_unlock_irqrestore(&smp_lock, flags);
-
-               update_smp_state(mdp5_kms, CID_UNUSED, &released);
+                       update_smp_state(smp, CID_UNUSED, &released);
+               }
+
+               bitmap_copy(ps->inuse, ps->pending, cnt);
        }
+}
+
+void mdp5_smp_destroy(struct mdp5_smp *smp)
+{
+       kfree(smp);
+}
+
+struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
+{
+       struct mdp5_smp *smp = NULL;
+       int ret;
+
+       smp = kzalloc(sizeof(*smp), GFP_KERNEL);
+       if (unlikely(!smp)) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       smp->dev = dev;
+       smp->blk_cnt = cfg->mmb_count;
+       smp->blk_size = cfg->mmb_size;
+
+       /* statically tied MMBs cannot be re-allocated: */
+       bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
+       spin_lock_init(&smp->state_lock);
+
+       return smp;
+fail:
+       if (smp)
+               mdp5_smp_destroy(smp);
 
-       bitmap_copy(ps->inuse, ps->pending, cnt);
+       return ERR_PTR(ret);
 }
index 0ab739e1a1dd59ccaf0e4bde59dc0a790a469616..e47179f635852a2f6ee546378200ce0a54a64499 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
 
 #include "msm_drv.h"
 
-#define MAX_SMP_BLOCKS  22
-#define SMP_BLK_SIZE    4096
-#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16)
-
-typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
-
 struct mdp5_client_smp_state {
        mdp5_smp_state_t inuse;
        mdp5_smp_state_t pending;
 };
 
 struct mdp5_kms;
+struct mdp5_smp;
+
+/*
+ * SMP module prototypes:
+ * mdp5_smp_init() returns a SMP @handler,
+ * which is then used to call the other mdp5_smp_*(handler, ...) functions.
+ */
 
-int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks);
-void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
-void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
+struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
+void  mdp5_smp_destroy(struct mdp5_smp *smp);
 
+int  mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width);
+void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
+void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
+void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
 
 #endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
new file mode 100644 (file)
index 0000000..f0de412
--- /dev/null
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_gem.h"
+
+struct msm_commit {
+       struct drm_atomic_state *state;
+       uint32_t fence;
+       struct msm_fence_cb fence_cb;
+};
+
+static void fence_cb(struct msm_fence_cb *cb);
+
+static struct msm_commit *new_commit(struct drm_atomic_state *state)
+{
+       struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
+
+       if (!c)
+               return NULL;
+
+       c->state = state;
+       /* TODO we might need a way to indicate to run the cb on a
+        * different wq so wait_for_vblanks() doesn't block retiring
+        * bo's..
+        */
+       INIT_FENCE_CB(&c->fence_cb, fence_cb);
+
+       return c;
+}
+
+/* The (potentially) asynchronous part of the commit.  At this point
+ * nothing can fail short of armageddon.
+ */
+static void complete_commit(struct msm_commit *c)
+{
+       struct drm_atomic_state *state = c->state;
+       struct drm_device *dev = state->dev;
+
+       drm_atomic_helper_commit_pre_planes(dev, state);
+
+       drm_atomic_helper_commit_planes(dev, state);
+
+       drm_atomic_helper_commit_post_planes(dev, state);
+
+       drm_atomic_helper_wait_for_vblanks(dev, state);
+
+       drm_atomic_helper_cleanup_planes(dev, state);
+
+       drm_atomic_state_free(state);
+
+       kfree(c);
+}
+
+static void fence_cb(struct msm_fence_cb *cb)
+{
+       struct msm_commit *c =
+                       container_of(cb, struct msm_commit, fence_cb);
+       complete_commit(c);
+}
+
+static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
+{
+       struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
+       c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
+}
+
+
+/**
+ * drm_atomic_helper_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a with drm_atomic_helper_check() pre-validated state
+ * object. This can still fail when e.g. the framebuffer reservation fails. For
+ * now this doesn't implement asynchronous commits.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+int msm_atomic_commit(struct drm_device *dev,
+               struct drm_atomic_state *state, bool async)
+{
+       struct msm_commit *c;
+       int nplanes = dev->mode_config.num_total_plane;
+       int i, ret;
+
+       ret = drm_atomic_helper_prepare_planes(dev, state);
+       if (ret)
+               return ret;
+
+       c = new_commit(state);
+
+       /*
+        * Figure out what fence to wait for:
+        */
+       for (i = 0; i < nplanes; i++) {
+               struct drm_plane *plane = state->planes[i];
+               struct drm_plane_state *new_state = state->plane_states[i];
+
+               if (!plane)
+                       continue;
+
+               if ((plane->state->fb != new_state->fb) && new_state->fb)
+                       add_fb(c, new_state->fb);
+       }
+
+       /*
+        * This is the point of no return - everything below never fails except
+        * when the hw goes bonghits. Which means we can commit the new state on
+        * the software side now.
+        */
+
+       drm_atomic_helper_swap_state(dev, state);
+
+       /*
+        * Everything below can be run asynchronously without the need to grab
+        * any modeset locks at all under one conditions: It must be guaranteed
+        * that the asynchronous work has either been cancelled (if the driver
+        * supports it, which at least requires that the framebuffers get
+        * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
+        * before the new state gets committed on the software side with
+        * drm_atomic_helper_swap_state().
+        *
+        * This scheme allows new atomic state updates to be prepared and
+        * checked in parallel to the asynchronous completion of the previous
+        * update. Which is important since compositors need to figure out the
+        * composition of the next frame right after having submitted the
+        * current layout.
+        */
+
+       if (async) {
+               msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
+               return 0;
+       }
+
+       ret = msm_wait_fence_interruptable(dev, c->fence, NULL);
+       if (ret) {
+               WARN_ON(ret);  // TODO unswap state back?  or??
+               kfree(c);
+               return ret;
+       }
+
+       complete_commit(c);
+
+       return 0;
+}
index b67ef59851250f6c442cf1dedfa0a88db5230ce9..d3b791b7ddef014e35b76ec43db3be1f8f576cf2 100644 (file)
@@ -29,6 +29,8 @@ static void msm_fb_output_poll_changed(struct drm_device *dev)
 static const struct drm_mode_config_funcs mode_config_funcs = {
        .fb_create = msm_framebuffer_create,
        .output_poll_changed = msm_fb_output_poll_changed,
+       .atomic_check = drm_atomic_helper_check,
+       .atomic_commit = msm_atomic_commit,
 };
 
 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
@@ -294,6 +296,8 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
                goto fail;
        }
 
+       drm_mode_config_reset(dev);
+
 #ifdef CONFIG_DRM_MSM_FBDEV
        priv->fbdev = msm_fbdev_init(dev);
 #endif
@@ -619,6 +623,26 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
        return ret;
 }
 
+int msm_queue_fence_cb(struct drm_device *dev,
+               struct msm_fence_cb *cb, uint32_t fence)
+{
+       struct msm_drm_private *priv = dev->dev_private;
+       int ret = 0;
+
+       mutex_lock(&dev->struct_mutex);
+       if (!list_empty(&cb->work.entry)) {
+               ret = -EINVAL;
+       } else if (fence > priv->completed_fence) {
+               cb->fence = fence;
+               list_add_tail(&cb->work.entry, &priv->fence_cbs);
+       } else {
+               queue_work(priv->wq, &cb->work);
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
 /* called from workqueue */
 void msm_update_fence(struct drm_device *dev, uint32_t fence)
 {
@@ -832,6 +856,7 @@ static struct drm_driver msm_driver = {
        .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
        .gem_prime_vmap     = msm_gem_prime_vmap,
        .gem_prime_vunmap   = msm_gem_prime_vunmap,
+       .gem_prime_mmap     = msm_gem_prime_mmap,
 #ifdef CONFIG_DEBUG_FS
        .debugfs_init       = msm_debugfs_init,
        .debugfs_cleanup    = msm_debugfs_cleanup,
index 67f9d0a2332c28b9983d7fe29000f907ffcd87f5..136303818436726004b2f294c6c90d69ccdd2f05 100644 (file)
 #include <linux/types.h>
 #include <asm/sizes.h>
 
-
-#if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_QCOM)
-/* stubs we need for compile-test: */
-static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
-{
-       return NULL;
-}
-#endif
-
 #ifndef CONFIG_OF
 #include <mach/board.h>
 #include <mach/socinfo.h>
@@ -48,7 +39,10 @@ static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
 #endif
 
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/msm_drm.h>
 #include <drm/drm_gem.h>
@@ -75,7 +69,12 @@ struct msm_drm_private {
        struct msm_kms *kms;
 
        /* subordinate devices, if present: */
-       struct platform_device *hdmi_pdev, *gpu_pdev;
+       struct platform_device *gpu_pdev;
+
+       /* possibly this should be in the kms component, but it is
+        * shared by both mdp4 and mdp5..
+        */
+       struct hdmi *hdmi;
 
        /* when we have more than one 'msm_gpu' these need to be an array: */
        struct msm_gpu *gpu;
@@ -145,21 +144,29 @@ void __msm_fence_worker(struct work_struct *work);
                (_cb)->func = _func;                         \
        } while (0)
 
+int msm_atomic_commit(struct drm_device *dev,
+               struct drm_atomic_state *state, bool async);
+
 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
 
 int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
                struct timespec *timeout);
+int msm_queue_fence_cb(struct drm_device *dev,
+               struct msm_fence_cb *cb, uint32_t fence);
 void msm_update_fence(struct drm_device *dev, uint32_t fence);
 
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                struct drm_file *file);
 
+int msm_gem_mmap_obj(struct drm_gem_object *obj,
+                       struct vm_area_struct *vma);
 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
 int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
                uint32_t *iova);
 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
+uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
 struct page **msm_gem_get_pages(struct drm_gem_object *obj);
 void msm_gem_put_pages(struct drm_gem_object *obj);
 void msm_gem_put_iova(struct drm_gem_object *obj, int id);
@@ -170,6 +177,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
 void *msm_gem_prime_vmap(struct drm_gem_object *obj);
 void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
                struct dma_buf_attachment *attach, struct sg_table *sg);
 int msm_gem_prime_pin(struct drm_gem_object *obj);
@@ -192,6 +200,9 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
                uint32_t size, struct sg_table *sgt);
 
+int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
 struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
@@ -202,8 +213,8 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
 struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
 
 struct hdmi;
-struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
-irqreturn_t hdmi_irq(int irq, void *dev_id);
+int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
+               struct drm_encoder *encoder);
 void __init hdmi_register(void);
 void __exit hdmi_unregister(void);
 
index 81bafdf19ab39fc32428e6f5be692c597a6a8276..84dec161d8360522081826e049a7cbd7b7995101 100644 (file)
@@ -24,7 +24,7 @@
 struct msm_framebuffer {
        struct drm_framebuffer base;
        const struct msm_format *format;
-       struct drm_gem_object *planes[2];
+       struct drm_gem_object *planes[3];
 };
 #define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
 
@@ -87,6 +87,44 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
 }
 #endif
 
+/* prepare/pin all the fb's bo's for scanout.  Note that it is not valid
+ * to prepare an fb more multiple different initiator 'id's.  But that
+ * should be fine, since only the scanout (mdpN) side of things needs
+ * this, the gpu doesn't care about fb's.
+ */
+int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
+{
+       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+       int ret, i, n = drm_format_num_planes(fb->pixel_format);
+       uint32_t iova;
+
+       for (i = 0; i < n; i++) {
+               ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
+               DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
+{
+       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+       int i, n = drm_format_num_planes(fb->pixel_format);
+
+       for (i = 0; i < n; i++)
+               msm_gem_put_iova(msm_fb->planes[i], id);
+}
+
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
+{
+       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+       if (!msm_fb->planes[plane])
+               return 0;
+       return msm_gem_iova(msm_fb->planes[plane], id);
+}
+
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
 {
        struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
@@ -166,6 +204,11 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 
        msm_fb->format = format;
 
+       if (n > ARRAY_SIZE(msm_fb->planes)) {
+               ret = -EINVAL;
+               goto fail;
+       }
+
        for (i = 0; i < n; i++) {
                unsigned int width = mode_cmd->width / (i ? hsub : 1);
                unsigned int height = mode_cmd->height / (i ? vsub : 1);
index ab5bfd2d0ebf2e29897fa56e1df2c77c0f47aeb8..94d55e526b4e0732cba7fcf45a2ce725c2433637 100644 (file)
@@ -93,9 +93,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
        uint32_t paddr;
        int ret, size;
 
-       sizes->surface_bpp = 32;
-       sizes->surface_depth = 24;
-
        DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
                        sizes->surface_height, sizes->surface_bpp,
                        sizes->fb_width, sizes->fb_height);
index 4b1b82adabdeaa504cd63ca290ff37f6acc223de..4a6f0e49d5b5f0709d586e3dcf4e271a0c99c0f5 100644 (file)
@@ -309,6 +309,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
        return ret;
 }
 
+/* get iova, taking a reference.  Should have a matching put */
 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -328,6 +329,16 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
        return ret;
 }
 
+/* get iova without taking a reference, used in places where you have
+ * already done a 'msm_gem_get_iova()'.
+ */
+uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       WARN_ON(!msm_obj->domain[id].iova);
+       return msm_obj->domain[id].iova;
+}
+
 void msm_gem_put_iova(struct drm_gem_object *obj, int id)
 {
        // XXX TODO ..
@@ -397,23 +408,10 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
 int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
                struct msm_fence_cb *cb)
 {
-       struct drm_device *dev = obj->dev;
-       struct msm_drm_private *priv = dev->dev_private;
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
-       int ret = 0;
-
-       mutex_lock(&dev->struct_mutex);
-       if (!list_empty(&cb->work.entry)) {
-               ret = -EINVAL;
-       } else if (is_active(msm_obj)) {
-               cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
-               list_add_tail(&cb->work.entry, &priv->fence_cbs);
-       } else {
-               queue_work(priv->wq, &cb->work);
-       }
-       mutex_unlock(&dev->struct_mutex);
-
-       return ret;
+       uint32_t fence = msm_gem_fence(msm_obj,
+                       MSM_PREP_READ | MSM_PREP_WRITE);
+       return msm_queue_fence_cb(obj->dev, cb, fence);
 }
 
 void msm_gem_move_to_active(struct drm_gem_object *obj,
@@ -452,12 +450,8 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
        int ret = 0;
 
        if (is_active(msm_obj)) {
-               uint32_t fence = 0;
+               uint32_t fence = msm_gem_fence(msm_obj, op);
 
-               if (op & MSM_PREP_READ)
-                       fence = msm_obj->write_fence;
-               if (op & MSM_PREP_WRITE)
-                       fence = max(fence, msm_obj->read_fence);
                if (op & MSM_PREP_NOSYNC)
                        timeout = NULL;
 
@@ -525,13 +519,11 @@ void msm_gem_free_object(struct drm_gem_object *obj)
        for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
                struct msm_mmu *mmu = priv->mmus[id];
                if (mmu && msm_obj->domain[id].iova) {
-                       uint32_t offset = (uint32_t)mmap_offset(obj);
+                       uint32_t offset = msm_obj->domain[id].iova;
                        mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
                }
        }
 
-       drm_gem_free_mmap_offset(obj);
-
        if (obj->import_attach) {
                if (msm_obj->vaddr)
                        dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
index bfb052688f8ea01a7852bb748cc508c40d1311d3..8fbbd0594c46604e107d19a3fbbe59a735d2375d 100644 (file)
@@ -70,6 +70,19 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
        return msm_obj->gpu != NULL;
 }
 
+static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
+               uint32_t op)
+{
+       uint32_t fence = 0;
+
+       if (op & MSM_PREP_READ)
+               fence = msm_obj->write_fence;
+       if (op & MSM_PREP_WRITE)
+               fence = max(fence, msm_obj->read_fence);
+
+       return fence;
+}
+
 #define MAX_CMDS 4
 
 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
index ad772fe36115f69c47aaca1f1caa2c2356a8bd01..dd7a7ab603e2c202ea297575fab0aba003234f5b 100644 (file)
@@ -37,6 +37,19 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
        /* TODO msm_gem_vunmap() */
 }
 
+int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+       int ret;
+
+       mutex_lock(&obj->dev->struct_mutex);
+       ret = drm_gem_mmap_obj(obj, obj->size, vma);
+       mutex_unlock(&obj->dev->struct_mutex);
+       if (ret < 0)
+               return ret;
+
+       return msm_gem_mmap_obj(vma->vm_private_data, vma);
+}
+
 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
                struct dma_buf_attachment *attach, struct sg_table *sg)
 {
index 12c24c8abf7f54ab8c94af874b6220a931053d45..6461e3565afe2b1f5c76fbbeca06e594fad0b657 100644 (file)
@@ -41,17 +41,28 @@ nouveau-y += core/subdev/bios/extdev.o
 nouveau-y += core/subdev/bios/fan.o
 nouveau-y += core/subdev/bios/gpio.o
 nouveau-y += core/subdev/bios/i2c.o
+nouveau-y += core/subdev/bios/image.o
 nouveau-y += core/subdev/bios/init.o
 nouveau-y += core/subdev/bios/mxm.o
+nouveau-y += core/subdev/bios/npde.o
+nouveau-y += core/subdev/bios/pcir.o
 nouveau-y += core/subdev/bios/perf.o
 nouveau-y += core/subdev/bios/pll.o
+nouveau-y += core/subdev/bios/pmu.o
 nouveau-y += core/subdev/bios/ramcfg.o
 nouveau-y += core/subdev/bios/rammap.o
+nouveau-y += core/subdev/bios/shadow.o
+nouveau-y += core/subdev/bios/shadowacpi.o
+nouveau-y += core/subdev/bios/shadowof.o
+nouveau-y += core/subdev/bios/shadowpci.o
+nouveau-y += core/subdev/bios/shadowramin.o
+nouveau-y += core/subdev/bios/shadowrom.o
 nouveau-y += core/subdev/bios/timing.o
 nouveau-y += core/subdev/bios/therm.o
 nouveau-y += core/subdev/bios/vmap.o
 nouveau-y += core/subdev/bios/volt.o
 nouveau-y += core/subdev/bios/xpio.o
+nouveau-y += core/subdev/bios/M0203.o
 nouveau-y += core/subdev/bios/M0205.o
 nouveau-y += core/subdev/bios/M0209.o
 nouveau-y += core/subdev/bios/P0260.o
@@ -86,6 +97,7 @@ nouveau-y += core/subdev/devinit/nva3.o
 nouveau-y += core/subdev/devinit/nvaf.o
 nouveau-y += core/subdev/devinit/nvc0.o
 nouveau-y += core/subdev/devinit/gm107.o
+nouveau-y += core/subdev/devinit/gm204.o
 nouveau-y += core/subdev/fb/base.o
 nouveau-y += core/subdev/fb/nv04.o
 nouveau-y += core/subdev/fb/nv10.o
@@ -129,6 +141,7 @@ nouveau-y += core/subdev/fb/ramgk20a.o
 nouveau-y += core/subdev/fb/ramgm107.o
 nouveau-y += core/subdev/fb/sddr2.o
 nouveau-y += core/subdev/fb/sddr3.o
+nouveau-y += core/subdev/fb/gddr3.o
 nouveau-y += core/subdev/fb/gddr5.o
 nouveau-y += core/subdev/fuse/base.o
 nouveau-y += core/subdev/fuse/g80.o
@@ -147,6 +160,7 @@ nouveau-y += core/subdev/i2c/bit.o
 nouveau-y += core/subdev/i2c/pad.o
 nouveau-y += core/subdev/i2c/padnv04.o
 nouveau-y += core/subdev/i2c/padnv94.o
+nouveau-y += core/subdev/i2c/padgm204.o
 nouveau-y += core/subdev/i2c/nv04.o
 nouveau-y += core/subdev/i2c/nv4e.o
 nouveau-y += core/subdev/i2c/nv50.o
@@ -154,6 +168,7 @@ nouveau-y += core/subdev/i2c/nv94.o
 nouveau-y += core/subdev/i2c/nvd0.o
 nouveau-y += core/subdev/i2c/gf117.o
 nouveau-y += core/subdev/i2c/nve0.o
+nouveau-y += core/subdev/i2c/gm204.o
 nouveau-y += core/subdev/ibus/nvc0.o
 nouveau-y += core/subdev/ibus/nve0.o
 nouveau-y += core/subdev/ibus/gk20a.o
@@ -211,6 +226,7 @@ nouveau-y += core/subdev/vm/nvc0.o
 nouveau-y += core/subdev/volt/base.o
 nouveau-y += core/subdev/volt/gpio.o
 nouveau-y += core/subdev/volt/nv40.o
+nouveau-y += core/subdev/volt/gk20a.o
 
 nouveau-y += core/engine/falcon.o
 nouveau-y += core/engine/xtensa.o
@@ -254,6 +270,7 @@ nouveau-y += core/engine/disp/nvd0.o
 nouveau-y += core/engine/disp/nve0.o
 nouveau-y += core/engine/disp/nvf0.o
 nouveau-y += core/engine/disp/gm107.o
+nouveau-y += core/engine/disp/gm204.o
 nouveau-y += core/engine/disp/dacnv50.o
 nouveau-y += core/engine/disp/dport.o
 nouveau-y += core/engine/disp/hdanva3.o
@@ -266,6 +283,7 @@ nouveau-y += core/engine/disp/piornv50.o
 nouveau-y += core/engine/disp/sornv50.o
 nouveau-y += core/engine/disp/sornv94.o
 nouveau-y += core/engine/disp/sornvd0.o
+nouveau-y += core/engine/disp/sorgm204.o
 nouveau-y += core/engine/disp/vga.o
 nouveau-y += core/engine/fifo/base.o
 nouveau-y += core/engine/fifo/nv04.o
index a490b805d7e321e0db2715ed979388e657b9f0e5..13f816cb08bd6b69039e74bc9eab32ab9df567c9 100644 (file)
@@ -222,116 +222,3 @@ nouveau_handle_put(struct nouveau_handle *handle)
        if (handle)
                nouveau_namedb_put(handle);
 }
-
-int
-nouveau_handle_new(struct nouveau_object *client, u32 _parent, u32 _handle,
-                  u16 _oclass, void *data, u32 size,
-                  struct nouveau_object **pobject)
-{
-       struct nouveau_object *parent = NULL;
-       struct nouveau_object *engctx = NULL;
-       struct nouveau_object *object = NULL;
-       struct nouveau_object *engine;
-       struct nouveau_oclass *oclass;
-       struct nouveau_handle *handle;
-       int ret;
-
-       /* lookup parent object and ensure it *is* a parent */
-       parent = nouveau_handle_ref(client, _parent);
-       if (!parent) {
-               nv_error(client, "parent 0x%08x not found\n", _parent);
-               return -ENOENT;
-       }
-
-       if (!nv_iclass(parent, NV_PARENT_CLASS)) {
-               nv_error(parent, "cannot have children\n");
-               ret = -EINVAL;
-               goto fail_class;
-       }
-
-       /* check that parent supports the requested subclass */
-       ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass);
-       if (ret) {
-               nv_debug(parent, "illegal class 0x%04x\n", _oclass);
-               goto fail_class;
-       }
-
-       /* make sure engine init has been completed *before* any objects
-        * it controls are created - the constructors may depend on
-        * state calculated at init (ie. default context construction)
-        */
-       if (engine) {
-               ret = nouveau_object_inc(engine);
-               if (ret)
-                       goto fail_class;
-       }
-
-       /* if engine requires it, create a context object to insert
-        * between the parent and its children (eg. PGRAPH context)
-        */
-       if (engine && nv_engine(engine)->cclass) {
-               ret = nouveau_object_ctor(parent, engine,
-                                         nv_engine(engine)->cclass,
-                                         data, size, &engctx);
-               if (ret)
-                       goto fail_engctx;
-       } else {
-               nouveau_object_ref(parent, &engctx);
-       }
-
-       /* finally, create new object and bind it to its handle */
-       ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
-       *pobject = object;
-       if (ret)
-               goto fail_ctor;
-
-       ret = nouveau_object_inc(object);
-       if (ret)
-               goto fail_init;
-
-       ret = nouveau_handle_create(parent, _parent, _handle, object, &handle);
-       if (ret)
-               goto fail_handle;
-
-       ret = nouveau_handle_init(handle);
-       if (ret)
-               nouveau_handle_destroy(handle);
-
-fail_handle:
-       nouveau_object_dec(object, false);
-fail_init:
-       nouveau_object_ref(NULL, &object);
-fail_ctor:
-       nouveau_object_ref(NULL, &engctx);
-fail_engctx:
-       if (engine)
-               nouveau_object_dec(engine, false);
-fail_class:
-       nouveau_object_ref(NULL, &parent);
-       return ret;
-}
-
-int
-nouveau_handle_del(struct nouveau_object *client, u32 _parent, u32 _handle)
-{
-       struct nouveau_object *parent = NULL;
-       struct nouveau_object *namedb = NULL;
-       struct nouveau_handle *handle = NULL;
-
-       parent = nouveau_handle_ref(client, _parent);
-       if (!parent)
-               return -ENOENT;
-
-       namedb = nv_pclass(parent, NV_NAMEDB_CLASS);
-       if (namedb) {
-               handle = nouveau_namedb_get(nv_namedb(namedb), _handle);
-               if (handle) {
-                       nouveau_namedb_put(handle);
-                       nouveau_handle_fini(handle, false);
-                       nouveau_handle_destroy(handle);
-               }
-       }
-
-       nouveau_object_ref(NULL, &parent);
-       return handle ? 0 : -EINVAL;
-}
index 0ef5a5713182bc5e8d05a45ad8670672286d945b..137e0b0faeae0d1b76ad9e5359c2eef9b66e0642 100644 (file)
@@ -29,6 +29,7 @@
 #include <nvif/unpack.h>
 #include <nvif/class.h>
 
+#include <subdev/bios.h>
 #include <subdev/fb.h>
 #include <subdev/instmem.h>
 
@@ -138,7 +139,7 @@ nouveau_devobj_info(struct nouveau_object *object, void *data, u32 size)
        }
 
        args->v0.chipset  = device->chipset;
-       args->v0.revision = device->chipset >= 0x10 ? nv_rd32(device, 0) : 0x00;
+       args->v0.revision = device->chiprev;
        if (pfb)  args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
        else      args->v0.ram_size = args->v0.ram_user = 0;
        if (imem) args->v0.ram_user = args->v0.ram_user - imem->reserved;
@@ -222,6 +223,7 @@ static const u64 disable_map[] = {
        [NVDEV_SUBDEV_VOLT]     = NV_DEVICE_V0_DISABLE_CORE,
        [NVDEV_SUBDEV_THERM]    = NV_DEVICE_V0_DISABLE_CORE,
        [NVDEV_SUBDEV_PWR]      = NV_DEVICE_V0_DISABLE_CORE,
+       [NVDEV_SUBDEV_FUSE]     = NV_DEVICE_V0_DISABLE_CORE,
        [NVDEV_ENGINE_DMAOBJ]   = NV_DEVICE_V0_DISABLE_CORE,
        [NVDEV_ENGINE_PERFMON]  = NV_DEVICE_V0_DISABLE_CORE,
        [NVDEV_ENGINE_FIFO]     = NV_DEVICE_V0_DISABLE_FIFO,
@@ -235,6 +237,7 @@ static const u64 disable_map[] = {
        [NVDEV_ENGINE_PPP]      = NV_DEVICE_V0_DISABLE_PPP,
        [NVDEV_ENGINE_COPY0]    = NV_DEVICE_V0_DISABLE_COPY0,
        [NVDEV_ENGINE_COPY1]    = NV_DEVICE_V0_DISABLE_COPY1,
+       [NVDEV_ENGINE_COPY2]    = NV_DEVICE_V0_DISABLE_COPY1,
        [NVDEV_ENGINE_VIC]      = NV_DEVICE_V0_DISABLE_VIC,
        [NVDEV_ENGINE_VENC]     = NV_DEVICE_V0_DISABLE_VENC,
        [NVDEV_ENGINE_DISP]     = NV_DEVICE_V0_DISABLE_DISP,
@@ -352,12 +355,14 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
                /* determine chipset and derive architecture from it */
                if ((boot0 & 0x1f000000) > 0) {
                        device->chipset = (boot0 & 0x1ff00000) >> 20;
+                       device->chiprev = (boot0 & 0x000000ff);
                        switch (device->chipset & 0x1f0) {
                        case 0x010: {
                                if (0x461 & (1 << (device->chipset & 0xf)))
                                        device->card_type = NV_10;
                                else
                                        device->card_type = NV_11;
+                               device->chiprev = 0x00;
                                break;
                        }
                        case 0x020: device->card_type = NV_20; break;
@@ -373,7 +378,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
                        case 0x0e0:
                        case 0x0f0:
                        case 0x100: device->card_type = NV_E0; break;
-                       case 0x110: device->card_type = GM100; break;
+                       case 0x110:
+                       case 0x120: device->card_type = GM100; break;
                        default:
                                break;
                        }
@@ -427,6 +433,10 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
                }
 
                nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
+       } else
+       if ( (args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY)) {
+               device->cname = "NULL";
+               device->oclass[NVDEV_SUBDEV_VBIOS] = &nouveau_bios_oclass;
        }
 
        if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) &&
index 6295668e29a5897a45e08d2ce09efac9c5aac721..4e74a3376de88d9a695eed859fd17ad136a28c91 100644 (file)
@@ -96,6 +96,49 @@ gm100_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
                device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+#endif
+               break;
+       case 0x124:
+               device->cname = "GM204";
+               device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+               device->oclass[NVDEV_SUBDEV_GPIO   ] =  nve0_gpio_oclass;
+               device->oclass[NVDEV_SUBDEV_I2C    ] =  gm204_i2c_oclass;
+               device->oclass[NVDEV_SUBDEV_FUSE   ] = &gm107_fuse_oclass;
+#if 0
+               /* looks to be some non-trivial changes */
+               device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
+               /* priv ring says no to 0x10eb14 writes */
+               device->oclass[NVDEV_SUBDEV_THERM  ] = &gm107_therm_oclass;
+#endif
+               device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  gm204_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
+               device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
+               device->oclass[NVDEV_SUBDEV_TIMER  ] = &gk20a_timer_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] =  gm107_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_LTC    ] =  gm107_ltc_oclass;
+               device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+               device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+               device->oclass[NVDEV_SUBDEV_PWR    ] =  nv108_pwr_oclass;
+#if 0
+               device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
+#endif
+               device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nvd0_dmaeng_oclass;
+#if 0
+               device->oclass[NVDEV_ENGINE_FIFO   ] =  nv108_fifo_oclass;
+               device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
+               device->oclass[NVDEV_ENGINE_GR     ] =  gm107_graph_oclass;
+#endif
+               device->oclass[NVDEV_ENGINE_DISP   ] =  gm204_disp_oclass;
+#if 0
+               device->oclass[NVDEV_ENGINE_COPY0  ] = &gm204_copy0_oclass;
+               device->oclass[NVDEV_ENGINE_COPY1  ] = &gm204_copy1_oclass;
+               device->oclass[NVDEV_ENGINE_COPY2  ] = &gm204_copy2_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 #endif
                break;
        default:
index cd05677ad4b7a128a087f4b611f73b5ab4978006..72a40f95d048e06c9bc625ed2e0575edbce6e933 100644 (file)
@@ -218,7 +218,6 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
-               device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
                device->oclass[NVDEV_ENGINE_DISP   ] =  nva3_disp_oclass;
                device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
                break;
index b1b2e484ecfabb1a51426a9737fa3d05a014c30c..674da1f095b29a1c1ecc524fef40eb3b60bc3a35 100644 (file)
@@ -179,6 +179,7 @@ nve0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_GR     ] =  gk20a_graph_oclass;
                device->oclass[NVDEV_ENGINE_COPY2  ] = &nve0_copy2_oclass;
                device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
+               device->oclass[NVDEV_SUBDEV_VOLT   ] = &gk20a_volt_oclass;
                break;
        case 0xf0:
                device->cname = "GK110";
index 39890221b91cbba988a6ab33534b757fce2d8718..16db08dfba6ef38645e813481e6d4201d94b695e 100644 (file)
@@ -28,7 +28,7 @@
 #include <subdev/bios/init.h>
 #include <subdev/i2c.h>
 
-#include <engine/disp.h>
+#include "nv50.h"
 
 #include <nvif/class.h>
 
@@ -326,7 +326,7 @@ void
 nouveau_dp_train(struct work_struct *w)
 {
        struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
-       struct nouveau_disp *disp = nouveau_disp(outp);
+       struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
        const struct dp_rates *cfg = nouveau_dp_rates;
        struct dp_state _dp = {
                .outp = outp,
@@ -334,8 +334,11 @@ nouveau_dp_train(struct work_struct *w)
        u32 datarate = 0;
        int ret;
 
+       if (!outp->base.info.location && priv->sor.magic)
+               priv->sor.magic(&outp->base);
+
        /* bring capabilities within encoder limits */
-       if (nv_mclass(disp) < GF110_DISP)
+       if (nv_mclass(priv) < GF110_DISP)
                outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED;
        if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) {
                outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT;
index b3df3fe2dc094b6dbebdc861c94450ef48ddf3e2..e2ad0543fb31e252d5662da41da258c4845b543e 100644 (file)
@@ -35,8 +35,8 @@
 
 static struct nouveau_oclass
 gm107_disp_sclass[] = {
-       { GM107_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
-       { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+       { GM107_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
+       { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
        { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
        { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
        { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
@@ -44,8 +44,8 @@ gm107_disp_sclass[] = {
 };
 
 static struct nouveau_oclass
-gm107_disp_base_oclass[] = {
-       { GM107_DISP, &nvd0_disp_base_ofuncs },
+gm107_disp_main_oclass[] = {
+       { GM107_DISP, &nvd0_disp_main_ofuncs },
        {}
 };
 
@@ -72,7 +72,7 @@ gm107_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_engine(priv)->sclass = gm107_disp_base_oclass;
+       nv_engine(priv)->sclass = gm107_disp_main_oclass;
        nv_engine(priv)->cclass = &nv50_disp_cclass;
        nv_subdev(priv)->intr = nvd0_disp_intr;
        INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
@@ -99,9 +99,9 @@ gm107_disp_oclass = &(struct nv50_disp_impl) {
        },
        .base.vblank = &nvd0_disp_vblank_func,
        .base.outp =  nvd0_disp_outp_sclass,
-       .mthd.core = &nve0_disp_mast_mthd_chan,
-       .mthd.base = &nvd0_disp_sync_mthd_chan,
+       .mthd.core = &nve0_disp_core_mthd_chan,
+       .mthd.base = &nvd0_disp_base_mthd_chan,
        .mthd.ovly = &nve0_disp_ovly_mthd_chan,
        .mthd.prev = -0x020000,
-       .head.scanoutpos = nvd0_disp_base_scanoutpos,
+       .head.scanoutpos = nvd0_disp_main_scanoutpos,
 }.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm204.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm204.c
new file mode 100644 (file)
index 0000000..672ded7
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <nvif/class.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static struct nouveau_oclass
+gm204_disp_sclass[] = {
+       { GM204_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
+       { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
+       { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
+       { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
+       { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
+       {}
+};
+
+static struct nouveau_oclass
+gm204_disp_main_oclass[] = {
+       { GM204_DISP, &nvd0_disp_main_ofuncs },
+       {}
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static int
+gm204_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv;
+       int heads = nv_rd32(parent, 0x022448);
+       int ret;
+
+       ret = nouveau_disp_create(parent, engine, oclass, heads,
+                                 "PDISP", "display", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       ret = nvkm_event_init(&nvd0_disp_chan_uevent, 1, 17, &priv->uevent);
+       if (ret)
+               return ret;
+
+       nv_engine(priv)->sclass = gm204_disp_main_oclass;
+       nv_engine(priv)->cclass = &nv50_disp_cclass;
+       nv_subdev(priv)->intr = nvd0_disp_intr;
+       INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
+       priv->sclass = gm204_disp_sclass;
+       priv->head.nr = heads;
+       priv->dac.nr = 3;
+       priv->sor.nr = 4;
+       priv->dac.power = nv50_dac_power;
+       priv->dac.sense = nv50_dac_sense;
+       priv->sor.power = nv50_sor_power;
+       priv->sor.hda_eld = nvd0_hda_eld;
+       priv->sor.hdmi = nvd0_hdmi_ctrl;
+       priv->sor.magic = gm204_sor_magic;
+       return 0;
+}
+
+struct nouveau_oclass *
+gm204_disp_outp_sclass[] = {
+       &gm204_sor_dp_impl.base.base,
+       NULL
+};
+
+struct nouveau_oclass *
+gm204_disp_oclass = &(struct nv50_disp_impl) {
+       .base.base.handle = NV_ENGINE(DISP, 0x07),
+       .base.base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = gm204_disp_ctor,
+               .dtor = _nouveau_disp_dtor,
+               .init = _nouveau_disp_init,
+               .fini = _nouveau_disp_fini,
+       },
+       .base.vblank = &nvd0_disp_vblank_func,
+       .base.outp =  gm204_disp_outp_sclass,
+       .mthd.core = &nve0_disp_core_mthd_chan,
+       .mthd.base = &nvd0_disp_base_mthd_chan,
+       .mthd.ovly = &nve0_disp_ovly_mthd_chan,
+       .mthd.prev = -0x020000,
+       .head.scanoutpos = nvd0_disp_main_scanoutpos,
+}.base.base;
index 2df3a937037de7c55d94f345bcdd8f2eeb88576e..44a8290aaea5cf2a0e7358cfa48cb9e7ad11dc27 100644 (file)
@@ -88,12 +88,14 @@ nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
 {
        struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
        nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000000 << index);
+       nv_wr32(priv, 0x610020, 0x00000001 << index);
 }
 
 static void
 nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
 {
        struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
+       nv_wr32(priv, 0x610020, 0x00000001 << index);
        nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000001 << index);
 }
 
@@ -374,7 +376,7 @@ nv50_disp_mthd_chan(struct nv50_disp_priv *priv, int debug, int head,
 }
 
 const struct nv50_disp_mthd_list
-nv50_disp_mast_mthd_base = {
+nv50_disp_core_mthd_base = {
        .mthd = 0x0000,
        .addr = 0x000000,
        .data = {
@@ -387,7 +389,7 @@ nv50_disp_mast_mthd_base = {
 };
 
 static const struct nv50_disp_mthd_list
-nv50_disp_mast_mthd_dac = {
+nv50_disp_core_mthd_dac = {
        .mthd = 0x0080,
        .addr = 0x000008,
        .data = {
@@ -399,7 +401,7 @@ nv50_disp_mast_mthd_dac = {
 };
 
 const struct nv50_disp_mthd_list
-nv50_disp_mast_mthd_sor = {
+nv50_disp_core_mthd_sor = {
        .mthd = 0x0040,
        .addr = 0x000008,
        .data = {
@@ -409,7 +411,7 @@ nv50_disp_mast_mthd_sor = {
 };
 
 const struct nv50_disp_mthd_list
-nv50_disp_mast_mthd_pior = {
+nv50_disp_core_mthd_pior = {
        .mthd = 0x0040,
        .addr = 0x000008,
        .data = {
@@ -419,7 +421,7 @@ nv50_disp_mast_mthd_pior = {
 };
 
 static const struct nv50_disp_mthd_list
-nv50_disp_mast_mthd_head = {
+nv50_disp_core_mthd_head = {
        .mthd = 0x0400,
        .addr = 0x000540,
        .data = {
@@ -466,21 +468,21 @@ nv50_disp_mast_mthd_head = {
 };
 
 static const struct nv50_disp_mthd_chan
-nv50_disp_mast_mthd_chan = {
+nv50_disp_core_mthd_chan = {
        .name = "Core",
        .addr = 0x000000,
        .data = {
-               { "Global", 1, &nv50_disp_mast_mthd_base },
-               {    "DAC", 3, &nv50_disp_mast_mthd_dac  },
-               {    "SOR", 2, &nv50_disp_mast_mthd_sor  },
-               {   "PIOR", 3, &nv50_disp_mast_mthd_pior },
-               {   "HEAD", 2, &nv50_disp_mast_mthd_head },
+               { "Global", 1, &nv50_disp_core_mthd_base },
+               {    "DAC", 3, &nv50_disp_core_mthd_dac  },
+               {    "SOR", 2, &nv50_disp_core_mthd_sor  },
+               {   "PIOR", 3, &nv50_disp_core_mthd_pior },
+               {   "HEAD", 2, &nv50_disp_core_mthd_head },
                {}
        }
 };
 
 int
-nv50_disp_mast_ctor(struct nouveau_object *parent,
+nv50_disp_core_ctor(struct nouveau_object *parent,
                    struct nouveau_object *engine,
                    struct nouveau_oclass *oclass, void *data, u32 size,
                    struct nouveau_object **pobject)
@@ -509,7 +511,7 @@ nv50_disp_mast_ctor(struct nouveau_object *parent,
 }
 
 static int
-nv50_disp_mast_init(struct nouveau_object *object)
+nv50_disp_core_init(struct nouveau_object *object)
 {
        struct nv50_disp_priv *priv = (void *)object->engine;
        struct nv50_disp_dmac *mast = (void *)object;
@@ -546,7 +548,7 @@ nv50_disp_mast_init(struct nouveau_object *object)
 }
 
 static int
-nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
+nv50_disp_core_fini(struct nouveau_object *object, bool suspend)
 {
        struct nv50_disp_priv *priv = (void *)object->engine;
        struct nv50_disp_dmac *mast = (void *)object;
@@ -567,11 +569,11 @@ nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
 }
 
 struct nv50_disp_chan_impl
-nv50_disp_mast_ofuncs = {
-       .base.ctor = nv50_disp_mast_ctor,
+nv50_disp_core_ofuncs = {
+       .base.ctor = nv50_disp_core_ctor,
        .base.dtor = nv50_disp_dmac_dtor,
-       .base.init = nv50_disp_mast_init,
-       .base.fini = nv50_disp_mast_fini,
+       .base.init = nv50_disp_core_init,
+       .base.fini = nv50_disp_core_fini,
        .base.map  = nv50_disp_chan_map,
        .base.ntfy = nv50_disp_chan_ntfy,
        .base.rd32 = nv50_disp_chan_rd32,
@@ -586,7 +588,7 @@ nv50_disp_mast_ofuncs = {
  ******************************************************************************/
 
 static const struct nv50_disp_mthd_list
-nv50_disp_sync_mthd_base = {
+nv50_disp_base_mthd_base = {
        .mthd = 0x0000,
        .addr = 0x000000,
        .data = {
@@ -611,7 +613,7 @@ nv50_disp_sync_mthd_base = {
 };
 
 const struct nv50_disp_mthd_list
-nv50_disp_sync_mthd_image = {
+nv50_disp_base_mthd_image = {
        .mthd = 0x0400,
        .addr = 0x000000,
        .data = {
@@ -625,18 +627,18 @@ nv50_disp_sync_mthd_image = {
 };
 
 static const struct nv50_disp_mthd_chan
-nv50_disp_sync_mthd_chan = {
+nv50_disp_base_mthd_chan = {
        .name = "Base",
        .addr = 0x000540,
        .data = {
-               { "Global", 1, &nv50_disp_sync_mthd_base },
-               {  "Image", 2, &nv50_disp_sync_mthd_image },
+               { "Global", 1, &nv50_disp_base_mthd_base },
+               {  "Image", 2, &nv50_disp_base_mthd_image },
                {}
        }
 };
 
 int
-nv50_disp_sync_ctor(struct nouveau_object *parent,
+nv50_disp_base_ctor(struct nouveau_object *parent,
                    struct nouveau_object *engine,
                    struct nouveau_oclass *oclass, void *data, u32 size,
                    struct nouveau_object **pobject)
@@ -669,8 +671,8 @@ nv50_disp_sync_ctor(struct nouveau_object *parent,
 }
 
 struct nv50_disp_chan_impl
-nv50_disp_sync_ofuncs = {
-       .base.ctor = nv50_disp_sync_ctor,
+nv50_disp_base_ofuncs = {
+       .base.ctor = nv50_disp_base_ctor,
        .base.dtor = nv50_disp_dmac_dtor,
        .base.init = nv50_disp_dmac_init,
        .base.fini = nv50_disp_dmac_fini,
@@ -942,7 +944,7 @@ nv50_disp_curs_ofuncs = {
  ******************************************************************************/
 
 int
-nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
+nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
 {
        const u32 blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
        const u32 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
@@ -974,7 +976,7 @@ nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
 }
 
 int
-nv50_disp_base_mthd(struct nouveau_object *object, u32 mthd,
+nv50_disp_main_mthd(struct nouveau_object *object, u32 mthd,
                    void *data, u32 size)
 {
        const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine);
@@ -1098,7 +1100,7 @@ nv50_disp_base_mthd(struct nouveau_object *object, u32 mthd,
 }
 
 int
-nv50_disp_base_ctor(struct nouveau_object *parent,
+nv50_disp_main_ctor(struct nouveau_object *parent,
                    struct nouveau_object *engine,
                    struct nouveau_oclass *oclass, void *data, u32 size,
                    struct nouveau_object **pobject)
@@ -1118,7 +1120,7 @@ nv50_disp_base_ctor(struct nouveau_object *parent,
 }
 
 void
-nv50_disp_base_dtor(struct nouveau_object *object)
+nv50_disp_main_dtor(struct nouveau_object *object)
 {
        struct nv50_disp_base *base = (void *)object;
        nouveau_ramht_ref(NULL, &base->ramht);
@@ -1126,7 +1128,7 @@ nv50_disp_base_dtor(struct nouveau_object *object)
 }
 
 static int
-nv50_disp_base_init(struct nouveau_object *object)
+nv50_disp_main_init(struct nouveau_object *object)
 {
        struct nv50_disp_priv *priv = (void *)object->engine;
        struct nv50_disp_base *base = (void *)object;
@@ -1194,7 +1196,7 @@ nv50_disp_base_init(struct nouveau_object *object)
 }
 
 static int
-nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
+nv50_disp_main_fini(struct nouveau_object *object, bool suspend)
 {
        struct nv50_disp_priv *priv = (void *)object->engine;
        struct nv50_disp_base *base = (void *)object;
@@ -1207,25 +1209,25 @@ nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
 }
 
 struct nouveau_ofuncs
-nv50_disp_base_ofuncs = {
-       .ctor = nv50_disp_base_ctor,
-       .dtor = nv50_disp_base_dtor,
-       .init = nv50_disp_base_init,
-       .fini = nv50_disp_base_fini,
-       .mthd = nv50_disp_base_mthd,
+nv50_disp_main_ofuncs = {
+       .ctor = nv50_disp_main_ctor,
+       .dtor = nv50_disp_main_dtor,
+       .init = nv50_disp_main_init,
+       .fini = nv50_disp_main_fini,
+       .mthd = nv50_disp_main_mthd,
        .ntfy = nouveau_disp_ntfy,
 };
 
 static struct nouveau_oclass
-nv50_disp_base_oclass[] = {
-       { NV50_DISP, &nv50_disp_base_ofuncs },
+nv50_disp_main_oclass[] = {
+       { NV50_DISP, &nv50_disp_main_ofuncs },
        {}
 };
 
 static struct nouveau_oclass
 nv50_disp_sclass[] = {
-       { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
-       { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+       { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
+       { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
        { NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
        { NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
        { NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -1974,7 +1976,7 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_engine(priv)->sclass = nv50_disp_base_oclass;
+       nv_engine(priv)->sclass = nv50_disp_main_oclass;
        nv_engine(priv)->cclass = &nv50_disp_cclass;
        nv_subdev(priv)->intr = nv50_disp_intr;
        INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -2007,9 +2009,9 @@ nv50_disp_oclass = &(struct nv50_disp_impl) {
        },
        .base.vblank = &nv50_disp_vblank_func,
        .base.outp =  nv50_disp_outp_sclass,
-       .mthd.core = &nv50_disp_mast_mthd_chan,
-       .mthd.base = &nv50_disp_sync_mthd_chan,
+       .mthd.core = &nv50_disp_core_mthd_chan,
+       .mthd.base = &nv50_disp_base_mthd_chan,
        .mthd.ovly = &nv50_disp_ovly_mthd_chan,
        .mthd.prev = 0x000004,
-       .head.scanoutpos = nv50_disp_base_scanoutpos,
+       .head.scanoutpos = nv50_disp_main_scanoutpos,
 }.base.base;
index 5279feefec062210706e087b315e802a84648913..7f08078ee9253fc41b73fe0a8b682359dca1edc4 100644 (file)
@@ -42,6 +42,7 @@ struct nv50_disp_priv {
                int (*hda_eld)(NV50_DISP_MTHD_V1);
                int (*hdmi)(NV50_DISP_MTHD_V1);
                u32 lvdsconf;
+               void (*magic)(struct nvkm_output *);
        } sor;
        struct {
                int nr;
@@ -63,10 +64,10 @@ struct nv50_disp_impl {
        } head;
 };
 
-int nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0);
-int nv50_disp_base_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
+int nv50_disp_main_mthd(struct nouveau_object *, u32, void *, u32);
 
-int nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0);
+int nvd0_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
 
 int nv50_dac_power(NV50_DISP_MTHD_V1);
 int nv50_dac_sense(NV50_DISP_MTHD_V1);
@@ -169,18 +170,18 @@ struct nv50_disp_mthd_chan {
        } data[];
 };
 
-extern struct nv50_disp_chan_impl nv50_disp_mast_ofuncs;
-int nv50_disp_mast_ctor(struct nouveau_object *, struct nouveau_object *,
+extern struct nv50_disp_chan_impl nv50_disp_core_ofuncs;
+int nv50_disp_core_ctor(struct nouveau_object *, struct nouveau_object *,
                        struct nouveau_oclass *, void *, u32,
                        struct nouveau_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_base;
-extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_sor;
-extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_pior;
-extern struct nv50_disp_chan_impl nv50_disp_sync_ofuncs;
-int nv50_disp_sync_ctor(struct nouveau_object *, struct nouveau_object *,
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_base;
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
+extern struct nv50_disp_chan_impl nv50_disp_base_ofuncs;
+int nv50_disp_base_ctor(struct nouveau_object *, struct nouveau_object *,
                        struct nouveau_oclass *, void *, u32,
                        struct nouveau_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_sync_mthd_image;
+extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
 extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs;
 int nv50_disp_ovly_ctor(struct nouveau_object *, struct nouveau_object *,
                        struct nouveau_oclass *, void *, u32,
@@ -194,12 +195,12 @@ extern struct nv50_disp_chan_impl nv50_disp_curs_ofuncs;
 int nv50_disp_curs_ctor(struct nouveau_object *, struct nouveau_object *,
                        struct nouveau_oclass *, void *, u32,
                        struct nouveau_object **);
-extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
-int  nv50_disp_base_ctor(struct nouveau_object *, struct nouveau_object *,
+extern struct nouveau_ofuncs nv50_disp_main_ofuncs;
+int  nv50_disp_main_ctor(struct nouveau_object *, struct nouveau_object *,
                         struct nouveau_oclass *, void *, u32,
                         struct nouveau_object **);
-void nv50_disp_base_dtor(struct nouveau_object *);
-extern struct nouveau_omthds nv50_disp_base_omthds[];
+void nv50_disp_main_dtor(struct nouveau_object *);
+extern struct nouveau_omthds nv50_disp_main_omthds[];
 extern struct nouveau_oclass nv50_disp_cclass;
 void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
                         const struct nv50_disp_mthd_chan *);
@@ -207,31 +208,31 @@ void nv50_disp_intr_supervisor(struct work_struct *);
 void nv50_disp_intr(struct nouveau_subdev *);
 extern const struct nvkm_event_func nv50_disp_vblank_func;
 
-extern const struct nv50_disp_mthd_chan nv84_disp_mast_mthd_chan;
-extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_dac;
-extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_head;
-extern const struct nv50_disp_mthd_chan nv84_disp_sync_mthd_chan;
+extern const struct nv50_disp_mthd_chan nv84_disp_core_mthd_chan;
+extern const struct nv50_disp_mthd_list nv84_disp_core_mthd_dac;
+extern const struct nv50_disp_mthd_list nv84_disp_core_mthd_head;
+extern const struct nv50_disp_mthd_chan nv84_disp_base_mthd_chan;
 extern const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan;
 
-extern const struct nv50_disp_mthd_chan nv94_disp_mast_mthd_chan;
+extern const struct nv50_disp_mthd_chan nv94_disp_core_mthd_chan;
 
-extern struct nv50_disp_chan_impl nvd0_disp_mast_ofuncs;
-extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_base;
-extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_dac;
-extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_sor;
-extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_pior;
-extern struct nv50_disp_chan_impl nvd0_disp_sync_ofuncs;
+extern struct nv50_disp_chan_impl nvd0_disp_core_ofuncs;
+extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_base;
+extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_dac;
+extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_sor;
+extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_pior;
+extern struct nv50_disp_chan_impl nvd0_disp_base_ofuncs;
 extern struct nv50_disp_chan_impl nvd0_disp_ovly_ofuncs;
-extern const struct nv50_disp_mthd_chan nvd0_disp_sync_mthd_chan;
+extern const struct nv50_disp_mthd_chan nvd0_disp_base_mthd_chan;
 extern struct nv50_disp_chan_impl nvd0_disp_oimm_ofuncs;
 extern struct nv50_disp_chan_impl nvd0_disp_curs_ofuncs;
-extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_main_ofuncs;
 extern struct nouveau_oclass nvd0_disp_cclass;
 void nvd0_disp_intr_supervisor(struct work_struct *);
 void nvd0_disp_intr(struct nouveau_subdev *);
 extern const struct nvkm_event_func nvd0_disp_vblank_func;
 
-extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan;
+extern const struct nv50_disp_mthd_chan nve0_disp_core_mthd_chan;
 extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan;
 
 extern struct nvkm_output_dp_impl nv50_pior_dp_impl;
@@ -242,6 +243,10 @@ int nv94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
 extern struct nouveau_oclass *nv94_disp_outp_sclass[];
 
 extern struct nvkm_output_dp_impl nvd0_sor_dp_impl;
+int nvd0_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
 extern struct nouveau_oclass *nvd0_disp_outp_sclass[];
 
+void gm204_sor_magic(struct nvkm_output *outp);
+extern struct nvkm_output_dp_impl gm204_sor_dp_impl;
+
 #endif
index d36284715b2ab0f20ae26bce925fe05b3b1bea67..13eff5e4ee515e638070374ca1615acb095b7120 100644 (file)
@@ -34,7 +34,7 @@
  ******************************************************************************/
 
 const struct nv50_disp_mthd_list
-nv84_disp_mast_mthd_dac = {
+nv84_disp_core_mthd_dac = {
        .mthd = 0x0080,
        .addr = 0x000008,
        .data = {
@@ -46,7 +46,7 @@ nv84_disp_mast_mthd_dac = {
 };
 
 const struct nv50_disp_mthd_list
-nv84_disp_mast_mthd_head = {
+nv84_disp_core_mthd_head = {
        .mthd = 0x0400,
        .addr = 0x000540,
        .data = {
@@ -98,15 +98,15 @@ nv84_disp_mast_mthd_head = {
 };
 
 const struct nv50_disp_mthd_chan
-nv84_disp_mast_mthd_chan = {
+nv84_disp_core_mthd_chan = {
        .name = "Core",
        .addr = 0x000000,
        .data = {
-               { "Global", 1, &nv50_disp_mast_mthd_base },
-               {    "DAC", 3, &nv84_disp_mast_mthd_dac  },
-               {    "SOR", 2, &nv50_disp_mast_mthd_sor  },
-               {   "PIOR", 3, &nv50_disp_mast_mthd_pior },
-               {   "HEAD", 2, &nv84_disp_mast_mthd_head },
+               { "Global", 1, &nv50_disp_core_mthd_base },
+               {    "DAC", 3, &nv84_disp_core_mthd_dac  },
+               {    "SOR", 2, &nv50_disp_core_mthd_sor  },
+               {   "PIOR", 3, &nv50_disp_core_mthd_pior },
+               {   "HEAD", 2, &nv84_disp_core_mthd_head },
                {}
        }
 };
@@ -116,7 +116,7 @@ nv84_disp_mast_mthd_chan = {
  ******************************************************************************/
 
 static const struct nv50_disp_mthd_list
-nv84_disp_sync_mthd_base = {
+nv84_disp_base_mthd_base = {
        .mthd = 0x0000,
        .addr = 0x000000,
        .data = {
@@ -146,12 +146,12 @@ nv84_disp_sync_mthd_base = {
 };
 
 const struct nv50_disp_mthd_chan
-nv84_disp_sync_mthd_chan = {
+nv84_disp_base_mthd_chan = {
        .name = "Base",
        .addr = 0x000540,
        .data = {
-               { "Global", 1, &nv84_disp_sync_mthd_base },
-               {  "Image", 2, &nv50_disp_sync_mthd_image },
+               { "Global", 1, &nv84_disp_base_mthd_base },
+               {  "Image", 2, &nv50_disp_base_mthd_image },
                {}
        }
 };
@@ -204,8 +204,8 @@ nv84_disp_ovly_mthd_chan = {
 
 static struct nouveau_oclass
 nv84_disp_sclass[] = {
-       { G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
-       { G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+       { G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
+       { G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
        { G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
        { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
        { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -213,8 +213,8 @@ nv84_disp_sclass[] = {
 };
 
 static struct nouveau_oclass
-nv84_disp_base_oclass[] = {
-       { G82_DISP, &nv50_disp_base_ofuncs },
+nv84_disp_main_oclass[] = {
+       { G82_DISP, &nv50_disp_main_ofuncs },
        {}
 };
 
@@ -240,7 +240,7 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_engine(priv)->sclass = nv84_disp_base_oclass;
+       nv_engine(priv)->sclass = nv84_disp_main_oclass;
        nv_engine(priv)->cclass = &nv50_disp_cclass;
        nv_subdev(priv)->intr = nv50_disp_intr;
        INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -268,9 +268,9 @@ nv84_disp_oclass = &(struct nv50_disp_impl) {
        },
        .base.vblank = &nv50_disp_vblank_func,
        .base.outp =  nv50_disp_outp_sclass,
-       .mthd.core = &nv84_disp_mast_mthd_chan,
-       .mthd.base = &nv84_disp_sync_mthd_chan,
+       .mthd.core = &nv84_disp_core_mthd_chan,
+       .mthd.base = &nv84_disp_base_mthd_chan,
        .mthd.ovly = &nv84_disp_ovly_mthd_chan,
        .mthd.prev = 0x000004,
-       .head.scanoutpos = nv50_disp_base_scanoutpos,
+       .head.scanoutpos = nv50_disp_main_scanoutpos,
 }.base.base;
index a117064002b1413a499a5a644ebd0c439abcdf75..2bb7ac5cd0e65d22fbce59dbfd9e1a915790cd26 100644 (file)
@@ -34,7 +34,7 @@
  ******************************************************************************/
 
 const struct nv50_disp_mthd_list
-nv94_disp_mast_mthd_sor = {
+nv94_disp_core_mthd_sor = {
        .mthd = 0x0040,
        .addr = 0x000008,
        .data = {
@@ -44,15 +44,15 @@ nv94_disp_mast_mthd_sor = {
 };
 
 const struct nv50_disp_mthd_chan
-nv94_disp_mast_mthd_chan = {
+nv94_disp_core_mthd_chan = {
        .name = "Core",
        .addr = 0x000000,
        .data = {
-               { "Global", 1, &nv50_disp_mast_mthd_base },
-               {    "DAC", 3, &nv84_disp_mast_mthd_dac  },
-               {    "SOR", 4, &nv94_disp_mast_mthd_sor  },
-               {   "PIOR", 3, &nv50_disp_mast_mthd_pior },
-               {   "HEAD", 2, &nv84_disp_mast_mthd_head },
+               { "Global", 1, &nv50_disp_core_mthd_base },
+               {    "DAC", 3, &nv84_disp_core_mthd_dac  },
+               {    "SOR", 4, &nv94_disp_core_mthd_sor  },
+               {   "PIOR", 3, &nv50_disp_core_mthd_pior },
+               {   "HEAD", 2, &nv84_disp_core_mthd_head },
                {}
        }
 };
@@ -63,8 +63,8 @@ nv94_disp_mast_mthd_chan = {
 
 static struct nouveau_oclass
 nv94_disp_sclass[] = {
-       { GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
-       { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+       { GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
+       { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
        { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
        { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
        { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -72,8 +72,8 @@ nv94_disp_sclass[] = {
 };
 
 static struct nouveau_oclass
-nv94_disp_base_oclass[] = {
-       { GT206_DISP, &nv50_disp_base_ofuncs },
+nv94_disp_main_oclass[] = {
+       { GT206_DISP, &nv50_disp_main_ofuncs },
        {}
 };
 
@@ -99,7 +99,7 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_engine(priv)->sclass = nv94_disp_base_oclass;
+       nv_engine(priv)->sclass = nv94_disp_main_oclass;
        nv_engine(priv)->cclass = &nv50_disp_cclass;
        nv_subdev(priv)->intr = nv50_disp_intr;
        INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -134,9 +134,9 @@ nv94_disp_oclass = &(struct nv50_disp_impl) {
        },
        .base.vblank = &nv50_disp_vblank_func,
        .base.outp =  nv94_disp_outp_sclass,
-       .mthd.core = &nv94_disp_mast_mthd_chan,
-       .mthd.base = &nv84_disp_sync_mthd_chan,
+       .mthd.core = &nv94_disp_core_mthd_chan,
+       .mthd.base = &nv84_disp_base_mthd_chan,
        .mthd.ovly = &nv84_disp_ovly_mthd_chan,
        .mthd.prev = 0x000004,
-       .head.scanoutpos = nv50_disp_base_scanoutpos,
+       .head.scanoutpos = nv50_disp_main_scanoutpos,
 }.base.base;
index c67e68aadd45987ae1f5dc6fb0a48a73102b1fcc..b32456c9494fcf9432de07ca6de65b5c236bd4a8 100644 (file)
@@ -80,8 +80,8 @@ nva0_disp_ovly_mthd_chan = {
 
 static struct nouveau_oclass
 nva0_disp_sclass[] = {
-       { GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
-       { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+       { GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
+       { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
        { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
        { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
        { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -89,8 +89,8 @@ nva0_disp_sclass[] = {
 };
 
 static struct nouveau_oclass
-nva0_disp_base_oclass[] = {
-       { GT200_DISP, &nv50_disp_base_ofuncs },
+nva0_disp_main_oclass[] = {
+       { GT200_DISP, &nv50_disp_main_ofuncs },
        {}
 };
 
@@ -116,7 +116,7 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_engine(priv)->sclass = nva0_disp_base_oclass;
+       nv_engine(priv)->sclass = nva0_disp_main_oclass;
        nv_engine(priv)->cclass = &nv50_disp_cclass;
        nv_subdev(priv)->intr = nv50_disp_intr;
        INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -144,9 +144,9 @@ nva0_disp_oclass = &(struct nv50_disp_impl) {
        },
        .base.vblank = &nv50_disp_vblank_func,
        .base.outp =  nv50_disp_outp_sclass,
-       .mthd.core = &nv84_disp_mast_mthd_chan,
-       .mthd.base = &nv84_disp_sync_mthd_chan,
+       .mthd.core = &nv84_disp_core_mthd_chan,
+       .mthd.base = &nv84_disp_base_mthd_chan,
        .mthd.ovly = &nva0_disp_ovly_mthd_chan,
        .mthd.prev = 0x000004,
-       .head.scanoutpos = nv50_disp_base_scanoutpos,
+       .head.scanoutpos = nv50_disp_main_scanoutpos,
 }.base.base;
index 22969f355aae66da0102bf722a87cbff81b529fc..951d79f9b781974bedfa39376464ced4731ef27c 100644 (file)
@@ -35,8 +35,8 @@
 
 static struct nouveau_oclass
 nva3_disp_sclass[] = {
-       { GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
-       { GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+       { GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
+       { GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
        { GT214_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
        { GT214_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
        { GT214_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -44,8 +44,8 @@ nva3_disp_sclass[] = {
 };
 
 static struct nouveau_oclass
-nva3_disp_base_oclass[] = {
-       { GT214_DISP, &nv50_disp_base_ofuncs },
+nva3_disp_main_oclass[] = {
+       { GT214_DISP, &nv50_disp_main_ofuncs },
        {}
 };
 
@@ -71,7 +71,7 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_engine(priv)->sclass = nva3_disp_base_oclass;
+       nv_engine(priv)->sclass = nva3_disp_main_oclass;
        nv_engine(priv)->cclass = &nv50_disp_cclass;
        nv_subdev(priv)->intr = nv50_disp_intr;
        INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -100,9 +100,9 @@ nva3_disp_oclass = &(struct nv50_disp_impl) {
        },
        .base.vblank = &nv50_disp_vblank_func,
        .base.outp =  nv94_disp_outp_sclass,
-       .mthd.core = &nv94_disp_mast_mthd_chan,
-       .mthd.base = &nv84_disp_sync_mthd_chan,
+       .mthd.core = &nv94_disp_core_mthd_chan,
+       .mthd.base = &nv84_disp_base_mthd_chan,
        .mthd.ovly = &nv84_disp_ovly_mthd_chan,
        .mthd.prev = 0x000004,
-       .head.scanoutpos = nv50_disp_base_scanoutpos,
+       .head.scanoutpos = nv50_disp_main_scanoutpos,
 }.base.base;
index 747e64bb9c06c935e1318a57ac7d8b879b473366..181a2d57e356c8570ec854ac5bd7b87d6cf506e4 100644 (file)
@@ -51,12 +51,14 @@ nvd0_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
 {
        struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
        nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000000 << index);
+       nv_wr32(priv, 0x61008c, 0x00000001 << index);
 }
 
 static void
 nvd0_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
 {
        struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
+       nv_wr32(priv, 0x61008c, 0x00000001 << index);
        nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000001 << index);
 }
 
@@ -151,7 +153,7 @@ nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
  ******************************************************************************/
 
 const struct nv50_disp_mthd_list
-nvd0_disp_mast_mthd_base = {
+nvd0_disp_core_mthd_base = {
        .mthd = 0x0000,
        .addr = 0x000000,
        .data = {
@@ -164,7 +166,7 @@ nvd0_disp_mast_mthd_base = {
 };
 
 const struct nv50_disp_mthd_list
-nvd0_disp_mast_mthd_dac = {
+nvd0_disp_core_mthd_dac = {
        .mthd = 0x0020,
        .addr = 0x000020,
        .data = {
@@ -177,7 +179,7 @@ nvd0_disp_mast_mthd_dac = {
 };
 
 const struct nv50_disp_mthd_list
-nvd0_disp_mast_mthd_sor = {
+nvd0_disp_core_mthd_sor = {
        .mthd = 0x0020,
        .addr = 0x000020,
        .data = {
@@ -190,7 +192,7 @@ nvd0_disp_mast_mthd_sor = {
 };
 
 const struct nv50_disp_mthd_list
-nvd0_disp_mast_mthd_pior = {
+nvd0_disp_core_mthd_pior = {
        .mthd = 0x0020,
        .addr = 0x000020,
        .data = {
@@ -203,7 +205,7 @@ nvd0_disp_mast_mthd_pior = {
 };
 
 static const struct nv50_disp_mthd_list
-nvd0_disp_mast_mthd_head = {
+nvd0_disp_core_mthd_head = {
        .mthd = 0x0300,
        .addr = 0x000300,
        .data = {
@@ -277,21 +279,21 @@ nvd0_disp_mast_mthd_head = {
 };
 
 static const struct nv50_disp_mthd_chan
-nvd0_disp_mast_mthd_chan = {
+nvd0_disp_core_mthd_chan = {
        .name = "Core",
        .addr = 0x000000,
        .data = {
-               { "Global", 1, &nvd0_disp_mast_mthd_base },
-               {    "DAC", 3, &nvd0_disp_mast_mthd_dac  },
-               {    "SOR", 8, &nvd0_disp_mast_mthd_sor  },
-               {   "PIOR", 4, &nvd0_disp_mast_mthd_pior },
-               {   "HEAD", 4, &nvd0_disp_mast_mthd_head },
+               { "Global", 1, &nvd0_disp_core_mthd_base },
+               {    "DAC", 3, &nvd0_disp_core_mthd_dac  },
+               {    "SOR", 8, &nvd0_disp_core_mthd_sor  },
+               {   "PIOR", 4, &nvd0_disp_core_mthd_pior },
+               {   "HEAD", 4, &nvd0_disp_core_mthd_head },
                {}
        }
 };
 
 static int
-nvd0_disp_mast_init(struct nouveau_object *object)
+nvd0_disp_core_init(struct nouveau_object *object)
 {
        struct nv50_disp_priv *priv = (void *)object->engine;
        struct nv50_disp_dmac *mast = (void *)object;
@@ -322,7 +324,7 @@ nvd0_disp_mast_init(struct nouveau_object *object)
 }
 
 static int
-nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
+nvd0_disp_core_fini(struct nouveau_object *object, bool suspend)
 {
        struct nv50_disp_priv *priv = (void *)object->engine;
        struct nv50_disp_dmac *mast = (void *)object;
@@ -344,11 +346,11 @@ nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
 }
 
 struct nv50_disp_chan_impl
-nvd0_disp_mast_ofuncs = {
-       .base.ctor = nv50_disp_mast_ctor,
+nvd0_disp_core_ofuncs = {
+       .base.ctor = nv50_disp_core_ctor,
        .base.dtor = nv50_disp_dmac_dtor,
-       .base.init = nvd0_disp_mast_init,
-       .base.fini = nvd0_disp_mast_fini,
+       .base.init = nvd0_disp_core_init,
+       .base.fini = nvd0_disp_core_fini,
        .base.ntfy = nv50_disp_chan_ntfy,
        .base.map  = nv50_disp_chan_map,
        .base.rd32 = nv50_disp_chan_rd32,
@@ -363,7 +365,7 @@ nvd0_disp_mast_ofuncs = {
  ******************************************************************************/
 
 static const struct nv50_disp_mthd_list
-nvd0_disp_sync_mthd_base = {
+nvd0_disp_base_mthd_base = {
        .mthd = 0x0000,
        .addr = 0x000000,
        .data = {
@@ -413,7 +415,7 @@ nvd0_disp_sync_mthd_base = {
 };
 
 static const struct nv50_disp_mthd_list
-nvd0_disp_sync_mthd_image = {
+nvd0_disp_base_mthd_image = {
        .mthd = 0x0400,
        .addr = 0x000400,
        .data = {
@@ -427,19 +429,19 @@ nvd0_disp_sync_mthd_image = {
 };
 
 const struct nv50_disp_mthd_chan
-nvd0_disp_sync_mthd_chan = {
+nvd0_disp_base_mthd_chan = {
        .name = "Base",
        .addr = 0x001000,
        .data = {
-               { "Global", 1, &nvd0_disp_sync_mthd_base },
-               {  "Image", 2, &nvd0_disp_sync_mthd_image },
+               { "Global", 1, &nvd0_disp_base_mthd_base },
+               {  "Image", 2, &nvd0_disp_base_mthd_image },
                {}
        }
 };
 
 struct nv50_disp_chan_impl
-nvd0_disp_sync_ofuncs = {
-       .base.ctor = nv50_disp_sync_ctor,
+nvd0_disp_base_ofuncs = {
+       .base.ctor = nv50_disp_base_ctor,
        .base.dtor = nv50_disp_dmac_dtor,
        .base.init = nvd0_disp_dmac_init,
        .base.fini = nvd0_disp_dmac_fini,
@@ -624,7 +626,7 @@ nvd0_disp_curs_ofuncs = {
  ******************************************************************************/
 
 int
-nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
+nvd0_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
 {
        const u32 total  = nv_rd32(priv, 0x640414 + (head * 0x300));
        const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
@@ -656,7 +658,7 @@ nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
 }
 
 static int
-nvd0_disp_base_init(struct nouveau_object *object)
+nvd0_disp_main_init(struct nouveau_object *object)
 {
        struct nv50_disp_priv *priv = (void *)object->engine;
        struct nv50_disp_base *base = (void *)object;
@@ -725,7 +727,7 @@ nvd0_disp_base_init(struct nouveau_object *object)
 }
 
 static int
-nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
+nvd0_disp_main_fini(struct nouveau_object *object, bool suspend)
 {
        struct nv50_disp_priv *priv = (void *)object->engine;
        struct nv50_disp_base *base = (void *)object;
@@ -737,25 +739,25 @@ nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
 }
 
 struct nouveau_ofuncs
-nvd0_disp_base_ofuncs = {
-       .ctor = nv50_disp_base_ctor,
-       .dtor = nv50_disp_base_dtor,
-       .init = nvd0_disp_base_init,
-       .fini = nvd0_disp_base_fini,
-       .mthd = nv50_disp_base_mthd,
+nvd0_disp_main_ofuncs = {
+       .ctor = nv50_disp_main_ctor,
+       .dtor = nv50_disp_main_dtor,
+       .init = nvd0_disp_main_init,
+       .fini = nvd0_disp_main_fini,
+       .mthd = nv50_disp_main_mthd,
        .ntfy = nouveau_disp_ntfy,
 };
 
 static struct nouveau_oclass
-nvd0_disp_base_oclass[] = {
-       { GF110_DISP, &nvd0_disp_base_ofuncs },
+nvd0_disp_main_oclass[] = {
+       { GF110_DISP, &nvd0_disp_main_ofuncs },
        {}
 };
 
 static struct nouveau_oclass
 nvd0_disp_sclass[] = {
-       { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
-       { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+       { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
+       { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
        { GF110_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
        { GF110_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
        { GF110_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
@@ -1055,6 +1057,9 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
 
                if (nvkm_output_dp_train(outp, pclk, true))
                        ERR("link not trained before attach\n");
+       } else {
+               if (priv->sor.magic)
+                       priv->sor.magic(outp);
        }
 
        exec_clkcmp(priv, head, 0, pclk, &conf);
@@ -1063,10 +1068,18 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
                addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
                data = 0x00000000;
        } else {
-               if (outp->info.type == DCB_OUTPUT_DP)
-                       nvd0_disp_intr_unk2_2_tu(priv, head, &outp->info);
                addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
                data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
+               switch (outp->info.type) {
+               case DCB_OUTPUT_TMDS:
+                       nv_mask(priv, addr, 0x007c0000, 0x00280000);
+                       break;
+               case DCB_OUTPUT_DP:
+                       nvd0_disp_intr_unk2_2_tu(priv, head, &outp->info);
+                       break;
+               default:
+                       break;
+               }
        }
 
        nv_mask(priv, addr, 0x00000707, data);
@@ -1259,7 +1272,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_engine(priv)->sclass = nvd0_disp_base_oclass;
+       nv_engine(priv)->sclass = nvd0_disp_main_oclass;
        nv_engine(priv)->cclass = &nv50_disp_cclass;
        nv_subdev(priv)->intr = nvd0_disp_intr;
        INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
@@ -1292,9 +1305,9 @@ nvd0_disp_oclass = &(struct nv50_disp_impl) {
        },
        .base.vblank = &nvd0_disp_vblank_func,
        .base.outp =  nvd0_disp_outp_sclass,
-       .mthd.core = &nvd0_disp_mast_mthd_chan,
-       .mthd.base = &nvd0_disp_sync_mthd_chan,
+       .mthd.core = &nvd0_disp_core_mthd_chan,
+       .mthd.base = &nvd0_disp_base_mthd_chan,
        .mthd.ovly = &nvd0_disp_ovly_mthd_chan,
        .mthd.prev = -0x020000,
-       .head.scanoutpos = nvd0_disp_base_scanoutpos,
+       .head.scanoutpos = nvd0_disp_main_scanoutpos,
 }.base.base;
index db144b2cf06bd7a98df12c59a718a608ba5ef27b..55debec7e68f7d7b1009f80864dd3833ae8674d1 100644 (file)
@@ -34,7 +34,7 @@
  ******************************************************************************/
 
 static const struct nv50_disp_mthd_list
-nve0_disp_mast_mthd_head = {
+nve0_disp_core_mthd_head = {
        .mthd = 0x0300,
        .addr = 0x000300,
        .data = {
@@ -113,15 +113,15 @@ nve0_disp_mast_mthd_head = {
 };
 
 const struct nv50_disp_mthd_chan
-nve0_disp_mast_mthd_chan = {
+nve0_disp_core_mthd_chan = {
        .name = "Core",
        .addr = 0x000000,
        .data = {
-               { "Global", 1, &nvd0_disp_mast_mthd_base },
-               {    "DAC", 3, &nvd0_disp_mast_mthd_dac  },
-               {    "SOR", 8, &nvd0_disp_mast_mthd_sor  },
-               {   "PIOR", 4, &nvd0_disp_mast_mthd_pior },
-               {   "HEAD", 4, &nve0_disp_mast_mthd_head },
+               { "Global", 1, &nvd0_disp_core_mthd_base },
+               {    "DAC", 3, &nvd0_disp_core_mthd_dac  },
+               {    "SOR", 8, &nvd0_disp_core_mthd_sor  },
+               {   "PIOR", 4, &nvd0_disp_core_mthd_pior },
+               {   "HEAD", 4, &nve0_disp_core_mthd_head },
                {}
        }
 };
@@ -200,8 +200,8 @@ nve0_disp_ovly_mthd_chan = {
 
 static struct nouveau_oclass
 nve0_disp_sclass[] = {
-       { GK104_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
-       { GK104_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+       { GK104_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
+       { GK104_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
        { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
        { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
        { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
@@ -209,8 +209,8 @@ nve0_disp_sclass[] = {
 };
 
 static struct nouveau_oclass
-nve0_disp_base_oclass[] = {
-       { GK104_DISP, &nvd0_disp_base_ofuncs },
+nve0_disp_main_oclass[] = {
+       { GK104_DISP, &nvd0_disp_main_ofuncs },
        {}
 };
 
@@ -237,7 +237,7 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_engine(priv)->sclass = nve0_disp_base_oclass;
+       nv_engine(priv)->sclass = nve0_disp_main_oclass;
        nv_engine(priv)->cclass = &nv50_disp_cclass;
        nv_subdev(priv)->intr = nvd0_disp_intr;
        INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
@@ -264,9 +264,9 @@ nve0_disp_oclass = &(struct nv50_disp_impl) {
        },
        .base.vblank = &nvd0_disp_vblank_func,
        .base.outp =  nvd0_disp_outp_sclass,
-       .mthd.core = &nve0_disp_mast_mthd_chan,
-       .mthd.base = &nvd0_disp_sync_mthd_chan,
+       .mthd.core = &nve0_disp_core_mthd_chan,
+       .mthd.base = &nvd0_disp_base_mthd_chan,
        .mthd.ovly = &nve0_disp_ovly_mthd_chan,
        .mthd.prev = -0x020000,
-       .head.scanoutpos = nvd0_disp_base_scanoutpos,
+       .head.scanoutpos = nvd0_disp_main_scanoutpos,
 }.base.base;
index 402d7d67d806fa0cd81dd61c758c6e868dc4cf90..3e7e2d28744c48b8e6f725e19bbbf7343013f452 100644 (file)
@@ -35,8 +35,8 @@
 
 static struct nouveau_oclass
 nvf0_disp_sclass[] = {
-       { GK110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
-       { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+       { GK110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
+       { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
        { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
        { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
        { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
@@ -44,8 +44,8 @@ nvf0_disp_sclass[] = {
 };
 
 static struct nouveau_oclass
-nvf0_disp_base_oclass[] = {
-       { GK110_DISP, &nvd0_disp_base_ofuncs },
+nvf0_disp_main_oclass[] = {
+       { GK110_DISP, &nvd0_disp_main_ofuncs },
        {}
 };
 
@@ -72,7 +72,7 @@ nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_engine(priv)->sclass = nvf0_disp_base_oclass;
+       nv_engine(priv)->sclass = nvf0_disp_main_oclass;
        nv_engine(priv)->cclass = &nv50_disp_cclass;
        nv_subdev(priv)->intr = nvd0_disp_intr;
        INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
@@ -99,9 +99,9 @@ nvf0_disp_oclass = &(struct nv50_disp_impl) {
        },
        .base.vblank = &nvd0_disp_vblank_func,
        .base.outp =  nvd0_disp_outp_sclass,
-       .mthd.core = &nve0_disp_mast_mthd_chan,
-       .mthd.base = &nvd0_disp_sync_mthd_chan,
+       .mthd.core = &nve0_disp_core_mthd_chan,
+       .mthd.base = &nvd0_disp_base_mthd_chan,
        .mthd.ovly = &nve0_disp_ovly_mthd_chan,
        .mthd.prev = -0x020000,
-       .head.scanoutpos = nvd0_disp_base_scanoutpos,
+       .head.scanoutpos = nvd0_disp_main_scanoutpos,
 }.base.base;
index a5ff00a9cedc044ee83d9ae343a0ed1461a38f3e..bbd9b6fdc90f1744ccce55375afa66f0f320fef0 100644 (file)
@@ -85,7 +85,10 @@ nvkm_output_create_(struct nouveau_object *parent,
            dcbE->sorconf.link : 0, dcbE->connector, dcbE->i2c_index,
            dcbE->bus, dcbE->heads);
 
-       outp->port = i2c->find(i2c, outp->info.i2c_index);
+       if (outp->info.type != DCB_OUTPUT_DP)
+               outp->port = i2c->find(i2c, NV_I2C_PORT(outp->info.i2c_index));
+       else
+               outp->port = i2c->find(i2c, NV_I2C_AUX(outp->info.i2c_index));
        outp->edid = outp->port;
 
        data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr, &connE);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c b/drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c
new file mode 100644 (file)
index 0000000..0b4fad3
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+static inline u32
+gm204_sor_soff(struct nvkm_output_dp *outp)
+{
+       return (ffs(outp->base.info.or) - 1) * 0x800;
+}
+
+static inline u32
+gm204_sor_loff(struct nvkm_output_dp *outp)
+{
+       return gm204_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
+}
+
+void
+gm204_sor_magic(struct nvkm_output *outp)
+{
+       struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+       const u32 soff = outp->or * 0x100;
+       const u32 data = outp->or + 1;
+       if (outp->info.sorconf.link & 1)
+               nv_mask(priv, 0x612308 + soff, 0x0000001f, 0x00000000 | data);
+       if (outp->info.sorconf.link & 2)
+               nv_mask(priv, 0x612388 + soff, 0x0000001f, 0x00000010 | data);
+}
+
+static inline u32
+gm204_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+       return lane * 0x08;
+}
+
+static int
+gm204_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+{
+       struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+       const u32 soff = gm204_sor_soff(outp);
+       const u32 data = 0x01010101 * pattern;
+       if (outp->base.info.sorconf.link & 1)
+               nv_mask(priv, 0x61c110 + soff, 0x0f0f0f0f, data);
+       else
+               nv_mask(priv, 0x61c12c + soff, 0x0f0f0f0f, data);
+       return 0;
+}
+
+static int
+gm204_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
+{
+       struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+       const u32 soff = gm204_sor_soff(outp);
+       const u32 loff = gm204_sor_loff(outp);
+       u32 mask = 0, i;
+
+       for (i = 0; i < nr; i++)
+               mask |= 1 << (gm204_sor_dp_lane_map(priv, i) >> 3);
+
+       nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask);
+       nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000);
+       nv_wait(priv, 0x61c034 + soff, 0x80000000, 0x00000000);
+       return 0;
+}
+
+static int
+gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
+{
+       struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       const u32 shift = gm204_sor_dp_lane_map(priv, ln);
+       const u32 loff = gm204_sor_loff(outp);
+       u32 addr, data[4];
+       u8  ver, hdr, cnt, len;
+       struct nvbios_dpout info;
+       struct nvbios_dpcfg ocfg;
+
+       addr = nvbios_dpout_match(bios, outp->base.info.hasht,
+                                       outp->base.info.hashm,
+                                &ver, &hdr, &cnt, &len, &info);
+       if (!addr)
+               return -ENODEV;
+
+       addr = nvbios_dpcfg_match(bios, addr, pc, vs, pe,
+                                &ver, &hdr, &cnt, &len, &ocfg);
+       if (!addr)
+               return -EINVAL;
+
+       data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
+       data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
+       data[2] = nv_rd32(priv, 0x61c130 + loff);
+       if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
+               data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
+       nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+       nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+       nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
+       data[3] = nv_rd32(priv, 0x61c13c + loff) & ~(0x000000ff << shift);
+       nv_wr32(priv, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
+       return 0;
+}
+
+struct nvkm_output_dp_impl
+gm204_sor_dp_impl = {
+       .base.base.handle = DCB_OUTPUT_DP,
+       .base.base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = _nvkm_output_dp_ctor,
+               .dtor = _nvkm_output_dp_dtor,
+               .init = _nvkm_output_dp_init,
+               .fini = _nvkm_output_dp_fini,
+       },
+       .pattern = gm204_sor_dp_pattern,
+       .lnk_pwr = gm204_sor_dp_lnk_pwr,
+       .lnk_ctl = nvd0_sor_dp_lnk_ctl,
+       .drv_ctl = gm204_sor_dp_drv_ctl,
+};
index 7b7bbc3e459e4eceafd2a476cca3a65a21254cdc..fdab2939070c8e441ce54c03178566bee695e6ed 100644 (file)
@@ -60,7 +60,7 @@ nvd0_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
        return 0;
 }
 
-static int
+int
 nvd0_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
 {
        struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
index 3fc4f0b0eacae1b86c0b3507f10a801a24ae185b..19f5f652296204e72c412c1b627a7ae452cce75a 100644 (file)
@@ -51,6 +51,7 @@ nvd0_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
                case GK104_DISP_CORE_CHANNEL_DMA:
                case GK110_DISP_CORE_CHANNEL_DMA:
                case GM107_DISP_CORE_CHANNEL_DMA:
+               case GM204_DISP_CORE_CHANNEL_DMA:
                case GF110_DISP_BASE_CHANNEL_DMA:
                case GK104_DISP_BASE_CHANNEL_DMA:
                case GK110_DISP_BASE_CHANNEL_DMA:
index 5ae6a43893b5996c0e13981851c71ed30947c5ad..1931057f996205d68ca93c94805211536c31aa4d 100644 (file)
@@ -551,8 +551,8 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
                        }
 
                        if (status & 0x40000000) {
-                               nouveau_fifo_uevent(&priv->base);
                                nv_wr32(priv, 0x002100, 0x40000000);
+                               nouveau_fifo_uevent(&priv->base);
                                status &= ~0x40000000;
                        }
                }
index 1fe1f8fbda0c0ab279d29630e7582fa48c607593..074d434c3077a53b74410b033a495ae58c3cd666 100644 (file)
@@ -740,6 +740,8 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
        u32 inte = nv_rd32(priv, 0x002628);
        u32 unkn;
 
+       nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
+
        for (unkn = 0; unkn < 8; unkn++) {
                u32 ints = (intr >> (unkn * 0x04)) & inte;
                if (ints & 0x1) {
@@ -751,8 +753,6 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
                        nv_mask(priv, 0x002628, ints, 0);
                }
        }
-
-       nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
 }
 
 static void
index d2f0fd39c1453c82e905c42ecf00551df7125f69..6a8db7c80bd148e150e6c4497c43f5bedc6249fe 100644 (file)
@@ -792,7 +792,7 @@ nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
        nouveau_engctx_put(engctx);
 }
 
-static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = {
+static const struct nouveau_bitfield nve0_fifo_pbdma_intr_0[] = {
        { 0x00000001, "MEMREQ" },
        { 0x00000002, "MEMACK_TIMEOUT" },
        { 0x00000004, "MEMACK_EXTRA" },
@@ -827,9 +827,10 @@ static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = {
 };
 
 static void
-nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
+nve0_fifo_intr_pbdma_0(struct nve0_fifo_priv *priv, int unit)
 {
-       u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
+       u32 mask = nv_rd32(priv, 0x04010c + (unit * 0x2000));
+       u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)) & mask;
        u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
        u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
        u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
@@ -840,11 +841,12 @@ nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
        if (stat & 0x00800000) {
                if (!nve0_fifo_swmthd(priv, chid, mthd, data))
                        show &= ~0x00800000;
+               nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
        }
 
        if (show) {
                nv_error(priv, "PBDMA%d:", unit);
-               nouveau_bitfield_print(nve0_fifo_pbdma_intr, show);
+               nouveau_bitfield_print(nve0_fifo_pbdma_intr_0, show);
                pr_cont("\n");
                nv_error(priv,
                         "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
@@ -853,10 +855,37 @@ nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
                         subc, mthd, data);
        }
 
-       nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
        nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
 }
 
+static const struct nouveau_bitfield nve0_fifo_pbdma_intr_1[] = {
+       { 0x00000001, "HCE_RE_ILLEGAL_OP" },
+       { 0x00000002, "HCE_RE_ALIGNB" },
+       { 0x00000004, "HCE_PRIV" },
+       { 0x00000008, "HCE_ILLEGAL_MTHD" },
+       { 0x00000010, "HCE_ILLEGAL_CLASS" },
+       {}
+};
+
+static void
+nve0_fifo_intr_pbdma_1(struct nve0_fifo_priv *priv, int unit)
+{
+       u32 mask = nv_rd32(priv, 0x04014c + (unit * 0x2000));
+       u32 stat = nv_rd32(priv, 0x040148 + (unit * 0x2000)) & mask;
+       u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
+
+       if (stat) {
+               nv_error(priv, "PBDMA%d:", unit);
+               nouveau_bitfield_print(nve0_fifo_pbdma_intr_1, stat);
+               pr_cont("\n");
+               nv_error(priv, "PBDMA%d: ch %d %08x %08x\n", unit, chid,
+                        nv_rd32(priv, 0x040150 + (unit * 0x2000)),
+                        nv_rd32(priv, 0x040154 + (unit * 0x2000)));
+       }
+
+       nv_wr32(priv, 0x040148 + (unit * 0x2000), stat);
+}
+
 static void
 nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv)
 {
@@ -939,7 +968,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
                u32 mask = nv_rd32(priv, 0x0025a0);
                while (mask) {
                        u32 unit = __ffs(mask);
-                       nve0_fifo_intr_pbdma(priv, unit);
+                       nve0_fifo_intr_pbdma_0(priv, unit);
+                       nve0_fifo_intr_pbdma_1(priv, unit);
                        nv_wr32(priv, 0x0025a0, (1 << unit));
                        mask &= ~(1 << unit);
                }
@@ -952,8 +982,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
        }
 
        if (stat & 0x80000000) {
-               nve0_fifo_intr_engine(priv);
                nv_wr32(priv, 0x002100, 0x80000000);
+               nve0_fifo_intr_engine(priv);
                stat &= ~0x80000000;
        }
 
@@ -1022,6 +1052,12 @@ nve0_fifo_init(struct nouveau_object *object)
                nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
        }
 
+       /* PBDMA[n].HCE */
+       for (i = 0; i < priv->spoon_nr; i++) {
+               nv_wr32(priv, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
+               nv_wr32(priv, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
+       }
+
        nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
 
        nv_wr32(priv, 0x002100, 0xffffffff);
index 30fd1dc64f9307603e137c43d5f6bf0e76e05d4f..17251e4b9e8647c7dd52b4bc7eebbe052a485259 100644 (file)
@@ -1557,7 +1557,7 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                    nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
                    nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
                    nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
-                       return -EINVAL;
+                       return -ENODEV;
                priv->firmware = true;
        }
 
index 1d9d893929bb3486b1c35229ac238ea89a883dbc..2ec2e50d3676607208ac8c1e5c5e58fc7ca9596a 100644 (file)
@@ -16,6 +16,7 @@ enum nv_subdev_type {
         * to during POST.
         */
        NVDEV_SUBDEV_DEVINIT,
+       NVDEV_SUBDEV_IBUS,
        NVDEV_SUBDEV_GPIO,
        NVDEV_SUBDEV_I2C,
        NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_I2C,
@@ -31,7 +32,6 @@ enum nv_subdev_type {
        NVDEV_SUBDEV_TIMER,
        NVDEV_SUBDEV_FB,
        NVDEV_SUBDEV_LTC,
-       NVDEV_SUBDEV_IBUS,
        NVDEV_SUBDEV_INSTMEM,
        NVDEV_SUBDEV_VM,
        NVDEV_SUBDEV_BAR,
@@ -92,6 +92,7 @@ struct nouveau_device {
                GM100    = 0x110,
        } card_type;
        u32 chipset;
+       u8  chiprev;
        u32 crystal;
 
        struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR];
@@ -158,6 +159,12 @@ nv_device_is_pci(struct nouveau_device *device)
        return device->pdev != NULL;
 }
 
+static inline bool
+nv_device_is_cpu_coherent(struct nouveau_device *device)
+{
+       return (!IS_ENABLED(CONFIG_ARM) && nv_device_is_pci(device));
+}
+
 static inline struct device *
 nv_device_base(struct nouveau_device *device)
 {
index ceb67d77087522cba3394837a7a2e18336db2872..d22a59138a9b5fd29af066eb885726e5c0ae84f0 100644 (file)
@@ -23,11 +23,6 @@ void nouveau_handle_destroy(struct nouveau_handle *);
 int  nouveau_handle_init(struct nouveau_handle *);
 int  nouveau_handle_fini(struct nouveau_handle *, bool suspend);
 
-int  nouveau_handle_new(struct nouveau_object *, u32 parent, u32 handle,
-                       u16 oclass, void *data, u32 size,
-                       struct nouveau_object **);
-int  nouveau_handle_del(struct nouveau_object *, u32 parent, u32 handle);
-
 struct nouveau_object *
 nouveau_handle_ref(struct nouveau_object *, u32 name);
 
index d7039482d6fd6cb035cba3ba4bb13cfd4a120d60..2e2afa502c990228b23f0227f69c0ade14354174 100644 (file)
@@ -203,21 +203,4 @@ nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
        return 0;
 }
 
-#include <core/handle.h>
-
-static inline int
-nouveau_object_new(struct nouveau_object *client, u32 parent, u32 handle,
-                  u16 oclass, void *data, u32 size,
-                  struct nouveau_object **pobject)
-{
-       return nouveau_handle_new(client, parent, handle, oclass,
-                                 data, size, pobject);
-}
-
-static inline int
-nouveau_object_del(struct nouveau_object *client, u32 parent, u32 handle)
-{
-       return nouveau_handle_del(client, parent, handle);
-}
-
 #endif
index 7a64f347b385d18d60eba08d58c969c0937d5853..fc307f1317ffaf7836edcd286cc3f8307b036973 100644 (file)
@@ -31,5 +31,6 @@ extern struct nouveau_oclass *nvd0_disp_oclass;
 extern struct nouveau_oclass *nve0_disp_oclass;
 extern struct nouveau_oclass *nvf0_disp_oclass;
 extern struct nouveau_oclass *gm107_disp_oclass;
+extern struct nouveau_oclass *gm204_disp_oclass;
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h
new file mode 100644 (file)
index 0000000..1f84d36
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef __NVBIOS_M0203_H__
+#define __NVBIOS_M0203_H__
+
+struct nvbios_M0203T {
+#define M0203T_TYPE_RAMCFG 0x00
+       u8  type;
+       u16 pointer;
+};
+
+u32 nvbios_M0203Te(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_M0203Tp(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                  struct nvbios_M0203T *);
+
+struct nvbios_M0203E {
+#define M0203E_TYPE_DDR2  0x0
+#define M0203E_TYPE_DDR3  0x1
+#define M0203E_TYPE_GDDR3 0x2
+#define M0203E_TYPE_GDDR5 0x3
+#define M0203E_TYPE_SKIP  0xf
+       u8 type;
+       u8 strap;
+       u8 group;
+};
+
+u32 nvbios_M0203Ee(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_M0203Ep(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
+                  struct nvbios_M0203E *);
+u32 nvbios_M0203Em(struct nouveau_bios *, u8 ramcfg, u8 *ver, u8 *hdr,
+                  struct nvbios_M0203E *);
+
+#endif
index 10b57a19a7decb7f6a5604ed6ce5befe4ecd18e9..c9bb112895aff3ea8b86eaa6237040ea946b1b96 100644 (file)
@@ -4,11 +4,14 @@
 struct nouveau_bios;
 
 enum dcb_i2c_type {
-       DCB_I2C_NV04_BIT = 0,
-       DCB_I2C_NV4E_BIT = 4,
-       DCB_I2C_NVIO_BIT = 5,
-       DCB_I2C_NVIO_AUX = 6,
-       DCB_I2C_UNUSED = 0xff
+       /* matches bios type field prior to ccb 4.1 */
+       DCB_I2C_NV04_BIT = 0x00,
+       DCB_I2C_NV4E_BIT = 0x04,
+       DCB_I2C_NVIO_BIT = 0x05,
+       DCB_I2C_NVIO_AUX = 0x06,
+       /* made up - mostly */
+       DCB_I2C_PMGR     = 0x80,
+       DCB_I2C_UNUSED   = 0xff
 };
 
 struct dcb_i2c_entry {
@@ -16,6 +19,7 @@ struct dcb_i2c_entry {
        u8 drive;
        u8 sense;
        u8 share;
+       u8 auxch;
 };
 
 u16 dcb_i2c_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h
new file mode 100644 (file)
index 0000000..3348b45
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef __NVBIOS_IMAGE_H__
+#define __NVBIOS_IMAGE_H__
+
+struct nvbios_image {
+       u32  base;
+       u32  size;
+       u8   type;
+       bool last;
+};
+
+bool nvbios_image(struct nouveau_bios *, int, struct nvbios_image *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h
new file mode 100644 (file)
index 0000000..b18413d
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef __NVBIOS_NPDE_H__
+#define __NVBIOS_NPDE_H__
+
+struct nvbios_npdeT {
+       u32 image_size;
+       bool last;
+};
+
+u32 nvbios_npdeTe(struct nouveau_bios *, u32);
+u32 nvbios_npdeTp(struct nouveau_bios *, u32, struct nvbios_npdeT *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h
new file mode 100644 (file)
index 0000000..3d634a0
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef __NVBIOS_PCIR_H__
+#define __NVBIOS_PCIR_H__
+
+struct nvbios_pcirT {
+       u16 vendor_id;
+       u16 device_id;
+       u8  class_code[3];
+       u32 image_size;
+       u16 image_rev;
+       u8  image_type;
+       bool last;
+};
+
+u32 nvbios_pcirTe(struct nouveau_bios *, u32, u8 *ver, u16 *hdr);
+u32 nvbios_pcirTp(struct nouveau_bios *, u32, u8 *ver, u16 *hdr,
+                 struct nvbios_pcirT *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h
new file mode 100644 (file)
index 0000000..9de593d
--- /dev/null
@@ -0,0 +1,37 @@
+#ifndef __NVBIOS_PMU_H__
+#define __NVBIOS_PMU_H__
+
+struct nvbios_pmuT {
+};
+
+u32 nvbios_pmuTe(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_pmuTp(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                struct nvbios_pmuT *);
+
+struct nvbios_pmuE {
+       u8  type;
+       u32 data;
+};
+
+u32 nvbios_pmuEe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_pmuEp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
+                struct nvbios_pmuE *);
+
+struct nvbios_pmuR {
+       u32 boot_addr_pmu;
+       u32 boot_addr;
+       u32 boot_size;
+       u32 code_addr_pmu;
+       u32 code_addr;
+       u32 code_size;
+       u32 init_addr_pmu;
+
+       u32 data_addr_pmu;
+       u32 data_addr;
+       u32 data_size;
+       u32 args_addr_pmu;
+};
+
+bool nvbios_pmuRm(struct nouveau_bios *, u8 type, struct nvbios_pmuR *);
+
+#endif
index a685bbd045680529e33d6d54e78a0359ab61c0cb..4a0e0ceb41baadfc7da271ffc11978c7426d8a17 100644 (file)
@@ -43,8 +43,9 @@ struct nvbios_ramcfg {
                        unsigned ramcfg_10_02_08:1;
                        unsigned ramcfg_10_02_10:1;
                        unsigned ramcfg_10_02_20:1;
-                       unsigned ramcfg_10_02_40:1;
+                       unsigned ramcfg_10_DLLoff:1;
                        unsigned ramcfg_10_03_0f:4;
+                       unsigned ramcfg_10_04_01:1;
                        unsigned ramcfg_10_05:8;
                        unsigned ramcfg_10_06:8;
                        unsigned ramcfg_10_07:8;
@@ -95,9 +96,29 @@ struct nvbios_ramcfg {
        union {
                struct {
                        unsigned timing_10_WR:8;
+                       unsigned timing_10_WTR:8;
                        unsigned timing_10_CL:8;
+                       unsigned timing_10_RC:8;
+                       /*empty: 4 */
+                       unsigned timing_10_RFC:8;        /* Byte 5 */
+                       /*empty: 6 */
+                       unsigned timing_10_RAS:8;        /* Byte 7 */
+                       /*empty: 8 */
+                       unsigned timing_10_RP:8;         /* Byte 9 */
+                       unsigned timing_10_RCDRD:8;
+                       unsigned timing_10_RCDWR:8;
+                       unsigned timing_10_RRD:8;
+                       unsigned timing_10_13:8;
                        unsigned timing_10_ODT:3;
+                       /* empty: 15 */
+                       unsigned timing_10_16:8;
+                       /* empty: 17 */
+                       unsigned timing_10_18:8;
                        unsigned timing_10_CWL:8;
+                       unsigned timing_10_20:8;
+                       unsigned timing_10_21:8;
+                       /* empty: 22, 23 */
+                       unsigned timing_10_24:8;
                };
                struct {
                        unsigned timing_20_2e_03:2;
index e292271a84e40384b4fd92d4a25c2f2fec025e9d..e007a9d44683fb40b3922e797d6302aabd18a9a2 100644 (file)
@@ -30,5 +30,6 @@ extern struct nouveau_oclass *nva3_devinit_oclass;
 extern struct nouveau_oclass *nvaf_devinit_oclass;
 extern struct nouveau_oclass *nvc0_devinit_oclass;
 extern struct nouveau_oclass *gm107_devinit_oclass;
+extern struct nouveau_oclass *gm204_devinit_oclass;
 
 #endif
index 1b937c2c25ae2e43a6594c203ad989294028a57c..d94ccacb40bf1ae60081e9458f6ffdaf14d36bb9 100644 (file)
@@ -8,6 +8,8 @@
 #include <subdev/bios/i2c.h>
 
 #define NV_I2C_PORT(n)    (0x00 + (n))
+#define NV_I2C_AUX(n)     (0x10 + (n))
+#define NV_I2C_EXT(n)     (0x20 + (n))
 #define NV_I2C_DEFAULT(n) (0x80 + (n))
 
 #define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n))
@@ -89,6 +91,7 @@ extern struct nouveau_oclass *nv94_i2c_oclass;
 extern struct nouveau_oclass *nvd0_i2c_oclass;
 extern struct nouveau_oclass *gf117_i2c_oclass;
 extern struct nouveau_oclass *nve0_i2c_oclass;
+extern struct nouveau_oclass *gm204_i2c_oclass;
 
 static inline int
 nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
index bf3d1f6113331a72045c7f154eadb04c64ebc9f6..f2427bf5aeed70e14bff97d84612ae81c62fdd30 100644 (file)
@@ -48,6 +48,8 @@ void nouveau_memx_wait(struct nouveau_memx *,
                       u32 addr, u32 mask, u32 data, u32 nsec);
 void nouveau_memx_nsec(struct nouveau_memx *, u32 nsec);
 void nouveau_memx_wait_vblank(struct nouveau_memx *);
+void nouveau_memx_train(struct nouveau_memx *);
+int  nouveau_memx_train_result(struct nouveau_pwr *, u32 *, int);
 void nouveau_memx_block(struct nouveau_memx *);
 void nouveau_memx_unblock(struct nouveau_memx *);
 
index 820b62ffd75b286e084fb915a22969e8ffdb5d15..67db5e58880d3df2ef503fbe9a5f14d6401da802 100644 (file)
@@ -52,6 +52,7 @@ int  _nouveau_volt_init(struct nouveau_object *);
 #define _nouveau_volt_fini _nouveau_subdev_fini
 
 extern struct nouveau_oclass nv40_volt_oclass;
+extern struct nouveau_oclass gk20a_volt_oclass;
 
 int nouveau_voltgpio_init(struct nouveau_volt *);
 int nouveau_voltgpio_get(struct nouveau_volt *);
index ccfa21d72ddc950e3a2f583f7e9194382e0a08ed..bdd05ee7ec72ea671a8413b6da15d0c33cd03dbd 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/power_supply.h>
 #include <linux/clk.h>
+#include <linux/regulator/consumer.h>
 
 #include <asm/unaligned.h>
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c b/drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c
new file mode 100644 (file)
index 0000000..28906b1
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/M0203.h>
+
+u32
+nvbios_M0203Te(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+       struct bit_entry bit_M;
+       u32 data = 0x00000000;
+
+       if (!bit_entry(bios, 'M', &bit_M)) {
+               if (bit_M.version == 2 && bit_M.length > 0x04)
+                       data = nv_ro16(bios, bit_M.offset + 0x03);
+               if (data) {
+                       *ver = nv_ro08(bios, data + 0x00);
+                       switch (*ver) {
+                       case 0x10:
+                               *hdr = nv_ro08(bios, data + 0x01);
+                               *len = nv_ro08(bios, data + 0x02);
+                               *cnt = nv_ro08(bios, data + 0x03);
+                               return data;
+                       default:
+                               break;
+                       }
+               }
+       }
+
+       return 0x00000000;
+}
+
+u32
+nvbios_M0203Tp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+              struct nvbios_M0203T *info)
+{
+       u32 data = nvbios_M0203Te(bios, ver, hdr, cnt, len);
+       memset(info, 0x00, sizeof(*info));
+       switch (!!data * *ver) {
+       case 0x10:
+               info->type    = nv_ro08(bios, data + 0x04);
+               info->pointer = nv_ro16(bios, data + 0x05);
+               break;
+       default:
+               break;
+       }
+       return data;
+}
+
+u32
+nvbios_M0203Ee(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
+{
+       u8  cnt, len;
+       u32 data = nvbios_M0203Te(bios, ver, hdr, &cnt, &len);
+       if (data && idx < cnt) {
+               data = data + *hdr + idx * len;
+               *hdr = len;
+               return data;
+       }
+       return 0x00000000;
+}
+
+u32
+nvbios_M0203Ep(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+              struct nvbios_M0203E *info)
+{
+       u32 data = nvbios_M0203Ee(bios, idx, ver, hdr);
+       memset(info, 0x00, sizeof(*info));
+       switch (!!data * *ver) {
+       case 0x10:
+               info->type  = (nv_ro08(bios, data + 0x00) & 0x0f) >> 0;
+               info->strap = (nv_ro08(bios, data + 0x00) & 0xf0) >> 4;
+               info->group = (nv_ro08(bios, data + 0x01) & 0x0f) >> 0;
+               return data;
+       default:
+               break;
+       }
+       return 0x00000000;
+}
+
+u32
+nvbios_M0203Em(struct nouveau_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr,
+              struct nvbios_M0203E *info)
+{
+       struct nvbios_M0203T M0203T;
+       u8  cnt, len, idx = 0xff;
+       u32 data;
+
+       if (!nvbios_M0203Tp(bios, ver, hdr, &cnt, &len, &M0203T)) {
+               nv_warn(bios, "M0203T not found\n");
+               return 0x00000000;
+       }
+
+       while ((data = nvbios_M0203Ep(bios, ++idx, ver, hdr, info))) {
+               switch (M0203T.type) {
+               case M0203T_TYPE_RAMCFG:
+                       if (info->strap != ramcfg)
+                               continue;
+                       return data;
+               default:
+                       nv_warn(bios, "M0203T type %02x\n", M0203T.type);
+                       return 0x00000000;
+               }
+       }
+
+       return data;
+}
index d45704a2c2df5b6aa1b95eda83d990995955e4c5..7df3a273553db24a2d608d318d1421f7f26f443a 100644 (file)
@@ -31,6 +31,8 @@
 #include <subdev/bios/bmp.h>
 #include <subdev/bios/bit.h>
 
+#include "priv.h"
+
 u8
 nvbios_checksum(const u8 *data, int size)
 {
@@ -56,362 +58,21 @@ nvbios_findstr(const u8 *data, int size, const char *str, int len)
        return 0;
 }
 
-#if defined(__powerpc__)
-static void
-nouveau_bios_shadow_of(struct nouveau_bios *bios)
+int
+nvbios_extend(struct nouveau_bios *bios, u32 length)
 {
-       struct pci_dev *pdev = nv_device(bios)->pdev;
-       struct device_node *dn;
-       const u32 *data;
-       int size;
-
-       dn = pci_device_to_OF_node(pdev);
-       if (!dn) {
-               nv_info(bios, "Unable to get the OF node\n");
-               return;
-       }
-
-       data = of_get_property(dn, "NVDA,BMP", &size);
-       if (data && size) {
-               bios->size = size;
-               bios->data = kmalloc(bios->size, GFP_KERNEL);
-               if (bios->data)
-                       memcpy(bios->data, data, size);
-       }
-}
-#endif
-
-static void
-nouveau_bios_shadow_pramin(struct nouveau_bios *bios)
-{
-       struct nouveau_device *device = nv_device(bios);
-       u64 addr = 0;
-       u32 bar0 = 0;
-       int i;
-
-       if (device->card_type >= NV_50) {
-               if (device->card_type >= NV_C0 && device->card_type < GM100) {
-                       if (nv_rd32(bios, 0x022500) & 0x00000001)
-                               return;
-               } else
-               if (device->card_type >= GM100) {
-                       if (nv_rd32(bios, 0x021c04) & 0x00000001)
-                               return;
-               }
-
-               addr = nv_rd32(bios, 0x619f04);
-               if (!(addr & 0x00000008)) {
-                       nv_debug(bios, "... not enabled\n");
-                       return;
+       if (bios->size < length) {
+               u8 *prev = bios->data;
+               if (!(bios->data = kmalloc(length, GFP_KERNEL))) {
+                       bios->data = prev;
+                       return -ENOMEM;
                }
-               if ( (addr & 0x00000003) != 1) {
-                       nv_debug(bios, "... not in vram\n");
-                       return;
-               }
-
-               addr = (addr & 0xffffff00) << 8;
-               if (!addr) {
-                       addr  = (u64)nv_rd32(bios, 0x001700) << 16;
-                       addr += 0xf0000;
-               }
-
-               bar0 = nv_mask(bios, 0x001700, 0xffffffff, addr >> 16);
-       }
-
-       /* bail if no rom signature */
-       if (nv_rd08(bios, 0x700000) != 0x55 ||
-           nv_rd08(bios, 0x700001) != 0xaa)
-               goto out;
-
-       bios->size = nv_rd08(bios, 0x700002) * 512;
-       if (!bios->size)
-               goto out;
-
-       bios->data = kmalloc(bios->size, GFP_KERNEL);
-       if (bios->data) {
-               for (i = 0; i < bios->size; i++)
-                       nv_wo08(bios, i, nv_rd08(bios, 0x700000 + i));
-       }
-
-out:
-       if (device->card_type >= NV_50)
-               nv_wr32(bios, 0x001700, bar0);
-}
-
-static void
-nouveau_bios_shadow_prom(struct nouveau_bios *bios)
-{
-       struct nouveau_device *device = nv_device(bios);
-       u32 pcireg, access;
-       u16 pcir;
-       int i;
-
-       /* there is no prom on nv4x IGP's */
-       if (device->card_type == NV_40 && device->chipset >= 0x4c)
-               return;
-
-       /* enable access to rom */
-       if (device->card_type >= NV_50)
-               pcireg = 0x088050;
-       else
-               pcireg = 0x001850;
-       access = nv_mask(bios, pcireg, 0x00000001, 0x00000000);
-
-       /* WARNING: PROM accesses should always be 32-bits aligned. Other
-        * accesses work on most chipset but do not on Kepler chipsets
-        */
-
-       /* bail if no rom signature, with a workaround for a PROM reading
-        * issue on some chipsets.  the first read after a period of
-        * inactivity returns the wrong result, so retry the first header
-        * byte a few times before giving up as a workaround
-        */
-       i = 16;
-       do {
-               u32 data = le32_to_cpu(nv_rd32(bios, 0x300000)) & 0xffff;
-               if (data == 0xaa55)
-                       break;
-       } while (i--);
-
-       if (!i)
-               goto out;
-
-       /* read entire bios image to system memory */
-       bios->size = (le32_to_cpu(nv_rd32(bios, 0x300000)) >> 16) & 0xff;
-       bios->size = bios->size * 512;
-       if (!bios->size)
-               goto out;
-
-       bios->data = kmalloc(bios->size, GFP_KERNEL);
-       if (!bios->data)
-               goto out;
-
-       for (i = 0; i < bios->size; i += 4)
-               ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i);
-
-       /* check the PCI record header */
-       pcir = nv_ro16(bios, 0x0018);
-       if (bios->data[pcir + 0] != 'P' ||
-           bios->data[pcir + 1] != 'C' ||
-           bios->data[pcir + 2] != 'I' ||
-           bios->data[pcir + 3] != 'R') {
-               bios->size = 0;
-               kfree(bios->data);
-       }
-
-out:
-       /* disable access to rom */
-       nv_wr32(bios, pcireg, access);
-}
-
-#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
-int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
-#else
-static inline bool
-nouveau_acpi_rom_supported(struct pci_dev *pdev) {
-       return false;
-}
-
-static inline int
-nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) {
-       return -EINVAL;
-}
-#endif
-
-static void
-nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
-{
-       struct pci_dev *pdev = nv_device(bios)->pdev;
-       int ret, cnt, i;
-
-       if (!nouveau_acpi_rom_supported(pdev)) {
-               bios->data = NULL;
-               return;
-       }
-
-       bios->size = 0;
-       bios->data = kmalloc(4096, GFP_KERNEL);
-       if (bios->data) {
-               if (nouveau_acpi_get_bios_chunk(bios->data, 0, 4096) == 4096)
-                       bios->size = bios->data[2] * 512;
-               kfree(bios->data);
+               memcpy(bios->data, prev, bios->size);
+               bios->size = length;
+               kfree(prev);
+               return 1;
        }
-
-       if (!bios->size)
-               return;
-
-       bios->data = kmalloc(bios->size, GFP_KERNEL);
-       if (bios->data) {
-               /* disobey the acpi spec - much faster on at least w530 ... */
-               ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size);
-               if (ret != bios->size ||
-                   nvbios_checksum(bios->data, bios->size)) {
-                       /* ... that didn't work, ok, i'll be good now */
-                       for (i = 0; i < bios->size; i += cnt) {
-                               cnt = min((bios->size - i), (u32)4096);
-                               ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
-                               if (ret != cnt)
-                                       break;
-                       }
-               }
-       }
-}
-
-static void
-nouveau_bios_shadow_pci(struct nouveau_bios *bios)
-{
-       struct pci_dev *pdev = nv_device(bios)->pdev;
-       size_t size;
-
-       if (!pci_enable_rom(pdev)) {
-               void __iomem *rom = pci_map_rom(pdev, &size);
-               if (rom && size) {
-                       bios->data = kmalloc(size, GFP_KERNEL);
-                       if (bios->data) {
-                               memcpy_fromio(bios->data, rom, size);
-                               bios->size = size;
-                       }
-               }
-               if (rom)
-                       pci_unmap_rom(pdev, rom);
-
-               pci_disable_rom(pdev);
-       }
-}
-
-static void
-nouveau_bios_shadow_platform(struct nouveau_bios *bios)
-{
-       struct pci_dev *pdev = nv_device(bios)->pdev;
-       size_t size;
-
-       void __iomem *rom = pci_platform_rom(pdev, &size);
-       if (rom && size) {
-               bios->data = kmalloc(size, GFP_KERNEL);
-               if (bios->data) {
-                       memcpy_fromio(bios->data, rom, size);
-                       bios->size = size;
-               }
-       }
-}
-
-static int
-nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)
-{
-       if (bios->size < 3 || !bios->data || bios->data[0] != 0x55 ||
-                       bios->data[1] != 0xAA) {
-               nv_info(bios, "... signature not found\n");
-               return 0;
-       }
-
-       if (nvbios_checksum(bios->data,
-                       min_t(u32, bios->data[2] * 512, bios->size))) {
-               nv_info(bios, "... checksum invalid\n");
-               /* if a ro image is somewhat bad, it's probably all rubbish */
-               return writeable ? 2 : 1;
-       }
-
-       nv_info(bios, "... appears to be valid\n");
-       return 3;
-}
-
-struct methods {
-       const char desc[16];
-       void (*shadow)(struct nouveau_bios *);
-       const bool rw;
-       int score;
-       u32 size;
-       u8 *data;
-};
-
-static int
-nouveau_bios_shadow(struct nouveau_bios *bios)
-{
-       struct methods shadow_methods[] = {
-#if defined(__powerpc__)
-               { "OpenFirmware", nouveau_bios_shadow_of, true, 0, 0, NULL },
-#endif
-               { "PRAMIN", nouveau_bios_shadow_pramin, true, 0, 0, NULL },
-               { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL },
-               { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL },
-               { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL },
-               { "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL },
-               {}
-       };
-       struct methods *mthd, *best;
-       const struct firmware *fw;
-       const char *optarg;
-       int optlen, ret;
-       char *source;
-
-       optarg = nouveau_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
-       source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
-       if (source) {
-               /* try to match one of the built-in methods */
-               mthd = shadow_methods;
-               do {
-                       if (strcasecmp(source, mthd->desc))
-                               continue;
-                       nv_info(bios, "source: %s\n", mthd->desc);
-
-                       mthd->shadow(bios);
-                       mthd->score = nouveau_bios_score(bios, mthd->rw);
-                       if (mthd->score) {
-                               kfree(source);
-                               return 0;
-                       }
-               } while ((++mthd)->shadow);
-
-               /* attempt to load firmware image */
-               ret = request_firmware(&fw, source, &nv_device(bios)->pdev->dev);
-               if (ret == 0) {
-                       bios->size = fw->size;
-                       bios->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
-                       release_firmware(fw);
-
-                       nv_info(bios, "image: %s\n", source);
-                       if (nouveau_bios_score(bios, 1)) {
-                               kfree(source);
-                               return 0;
-                       }
-
-                       kfree(bios->data);
-                       bios->data = NULL;
-               }
-
-               nv_error(bios, "source \'%s\' invalid\n", source);
-               kfree(source);
-       }
-
-       mthd = shadow_methods;
-       do {
-               nv_info(bios, "checking %s for image...\n", mthd->desc);
-               mthd->shadow(bios);
-               mthd->score = nouveau_bios_score(bios, mthd->rw);
-               mthd->size = bios->size;
-               mthd->data = bios->data;
-               bios->data = NULL;
-       } while (mthd->score != 3 && (++mthd)->shadow);
-
-       mthd = shadow_methods;
-       best = mthd;
-       do {
-               if (mthd->score > best->score) {
-                       kfree(best->data);
-                       best = mthd;
-               }
-       } while ((++mthd)->shadow);
-
-       if (best->score) {
-               nv_info(bios, "using image from %s\n", best->desc);
-               bios->size = best->size;
-               bios->data = best->data;
-               return 0;
-       }
-
-       nv_error(bios, "unable to locate usable image\n");
-       return -EINVAL;
+       return 0;
 }
 
 static u8
@@ -472,7 +133,7 @@ nouveau_bios_ctor(struct nouveau_object *parent,
        if (ret)
                return ret;
 
-       ret = nouveau_bios_shadow(bios);
+       ret = nvbios_shadow(bios);
        if (ret)
                return ret;
 
index bd8d348385b38fe1a3231586bdea941be9657558..96099aff8b41a0067a4fb72731e530676f4b8852 100644 (file)
@@ -42,7 +42,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 
        *ver = nv_ro08(bios, dcb);
 
-       if (*ver >= 0x41) {
+       if (*ver >= 0x42) {
                nv_warn(bios, "DCB version 0x%02x unknown\n", *ver);
                return 0x0000;
        } else
@@ -157,17 +157,20 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
                                        break;
                                }
 
-                               switch (conf & 0x0f000000) {
-                               case 0x0f000000:
-                                       outp->dpconf.link_nr = 4;
-                                       break;
-                               case 0x03000000:
-                                       outp->dpconf.link_nr = 2;
-                                       break;
-                               case 0x01000000:
-                               default:
-                                       outp->dpconf.link_nr = 1;
-                                       break;
+                               outp->dpconf.link_nr = (conf & 0x0f000000) >> 24;
+                               if (*ver < 0x41) {
+                                       switch (outp->dpconf.link_nr) {
+                                       case 0x0f:
+                                               outp->dpconf.link_nr = 4;
+                                               break;
+                                       case 0x03:
+                                               outp->dpconf.link_nr = 2;
+                                               break;
+                                       case 0x01:
+                                       default:
+                                               outp->dpconf.link_nr = 1;
+                                               break;
+                                       }
                                }
 
                                /* fall-through... */
index 7f16e52d9bea2d45a6471e1e97300ae78a4b5cf7..51f3555996945b885d583dfcb34ff3dc0b305642 100644 (file)
@@ -40,6 +40,7 @@ nvbios_disp_table(struct nouveau_bios *bios,
                                switch (*ver) {
                                case 0x20:
                                case 0x21:
+                               case 0x22:
                                        *hdr = nv_ro08(bios, data + 0x01);
                                        *len = nv_ro08(bios, data + 0x02);
                                        *cnt = nv_ro08(bios, data + 0x03);
index f309dd657250f3031af58bcc8a8c56c2569eef13..cef53f81f12bfebcf62ecafda2b469c27592ce14 100644 (file)
@@ -41,6 +41,7 @@ nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
                                case 0x21:
                                case 0x30:
                                case 0x40:
+                               case 0x41:
                                        *hdr = nv_ro08(bios, data + 0x01);
                                        *len = nv_ro08(bios, data + 0x02);
                                        *cnt = nv_ro08(bios, data + 0x03);
@@ -70,6 +71,7 @@ nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx,
                        *cnt = nv_ro08(bios, outp + 0x04);
                        break;
                case 0x40:
+               case 0x41:
                        *hdr = nv_ro08(bios, data + 0x04);
                        *cnt = 0;
                        *len = 0;
@@ -108,6 +110,7 @@ nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
                                info->script[4] = nv_ro16(bios, data + 0x10);
                        break;
                case 0x40:
+               case 0x41:
                        info->flags     = nv_ro08(bios, data + 0x04);
                        info->script[0] = nv_ro16(bios, data + 0x05);
                        info->script[1] = nv_ro16(bios, data + 0x07);
@@ -172,10 +175,11 @@ nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
                        break;
                case 0x30:
                case 0x40:
+               case 0x41:
                        info->pc    = nv_ro08(bios, data + 0x00);
                        info->dc    = nv_ro08(bios, data + 0x01);
                        info->pe    = nv_ro08(bios, data + 0x02);
-                       info->tx_pu = nv_ro08(bios, data + 0x03);
+                       info->tx_pu = nv_ro08(bios, data + 0x03) & 0x0f;
                        break;
                default:
                        data = 0x0000;
@@ -194,6 +198,10 @@ nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 pc, u8 vs, u8 pe,
        u16 data;
 
        if (*ver >= 0x30) {
+               /*XXX: there's a second set of these on at least 4.1, that
+                *     i've witnessed nvidia using instead of the first
+                *     on gm204.  figure out what/why
+                */
                const u8 vsoff[] = { 0, 4, 7, 9 };
                idx = (pc * 10) + vsoff[vs] + pe;
        } else {
index b2a676e53580ca6cd143e238b211cbcd7b5a027a..49285d4f7ca50a5c1ca224a48b8e27a8c431c2eb 100644 (file)
@@ -90,7 +90,7 @@ nvbios_extdev_find(struct nouveau_bios *bios, enum nvbios_extdev_type type,
        u16 entry;
 
        i = 0;
-       while (!(entry = nvbios_extdev_entry(bios, i++, &ver, &len))) {
+       while ((entry = nvbios_extdev_entry(bios, i++, &ver, &len))) {
                extdev_parse_entry(bios, entry, func);
                if (func->type == type)
                        return 0;
index cfb9288c6d28cbd2f2ea662bf77176c386ad14d9..282320ba9264155914600c397e34bbef6682db4b 100644 (file)
@@ -39,6 +39,11 @@ dcb_i2c_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
                        i2c = nv_ro16(bios, dcb + 4);
        }
 
+       if (i2c && *ver >= 0x42) {
+               nv_warn(bios, "ccb %02x not supported\n", *ver);
+               return 0x0000;
+       }
+
        if (i2c && *ver >= 0x30) {
                *ver = nv_ro08(bios, i2c + 0);
                *hdr = nv_ro08(bios, i2c + 1);
@@ -70,14 +75,25 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
        u8  ver, len;
        u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
        if (ent) {
-               info->type  = nv_ro08(bios, ent + 3);
-               info->share = DCB_I2C_UNUSED;
-               if (ver < 0x30) {
-                       info->type &= 0x07;
+               if (ver >= 0x41) {
+                       if (!(nv_ro32(bios, ent) & 0x80000000))
+                               info->type = DCB_I2C_UNUSED;
+                       else
+                               info->type = DCB_I2C_PMGR;
+               } else
+               if (ver >= 0x30) {
+                       info->type = nv_ro08(bios, ent + 0x03);
+               } else {
+                       info->type = nv_ro08(bios, ent + 0x03) & 0x07;
                        if (info->type == 0x07)
                                info->type = DCB_I2C_UNUSED;
                }
 
+               info->drive = DCB_I2C_UNUSED;
+               info->sense = DCB_I2C_UNUSED;
+               info->share = DCB_I2C_UNUSED;
+               info->auxch = DCB_I2C_UNUSED;
+
                switch (info->type) {
                case DCB_I2C_NV04_BIT:
                        info->drive = nv_ro08(bios, ent + 0);
@@ -87,12 +103,23 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
                        info->drive = nv_ro08(bios, ent + 1);
                        return 0;
                case DCB_I2C_NVIO_BIT:
-               case DCB_I2C_NVIO_AUX:
                        info->drive = nv_ro08(bios, ent + 0) & 0x0f;
-                       if (nv_ro08(bios, ent + 1) & 0x01) {
-                               info->share  = nv_ro08(bios, ent + 1) >> 1;
-                               info->share &= 0x0f;
-                       }
+                       if (nv_ro08(bios, ent + 1) & 0x01)
+                               info->share = nv_ro08(bios, ent + 1) >> 1;
+                       return 0;
+               case DCB_I2C_NVIO_AUX:
+                       info->auxch = nv_ro08(bios, ent + 0) & 0x0f;
+                       if (nv_ro08(bios, ent + 1) & 0x01)
+                                       info->share = info->auxch;
+                       return 0;
+               case DCB_I2C_PMGR:
+                       info->drive = (nv_ro16(bios, ent + 0) & 0x01f) >> 0;
+                       if (info->drive == 0x1f)
+                               info->drive = DCB_I2C_UNUSED;
+                       info->auxch = (nv_ro16(bios, ent + 0) & 0x3e0) >> 5;
+                       if (info->auxch == 0x1f)
+                               info->auxch = DCB_I2C_UNUSED;
+                       info->share = info->auxch;
                        return 0;
                case DCB_I2C_UNUSED:
                        return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/image.c b/drivers/gpu/drm/nouveau/core/subdev/bios/image.c
new file mode 100644 (file)
index 0000000..373f9a5
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/image.h>
+#include <subdev/bios/pcir.h>
+#include <subdev/bios/npde.h>
+
+static bool
+nvbios_imagen(struct nouveau_bios *bios, struct nvbios_image *image)
+{
+       struct nvbios_pcirT pcir;
+       struct nvbios_npdeT npde;
+       u8  ver;
+       u16 hdr;
+       u32 data;
+
+       switch ((data = nv_ro16(bios, image->base + 0x00))) {
+       case 0xaa55:
+       case 0xbb77:
+       case 0x4e56: /* NV */
+               break;
+       default:
+               nv_debug(bios, "%08x: ROM signature (%04x) unknown\n",
+                        image->base, data);
+               return false;
+       }
+
+       if (!(data = nvbios_pcirTp(bios, image->base, &ver, &hdr, &pcir)))
+               return false;
+       image->size = pcir.image_size;
+       image->type = pcir.image_type;
+       image->last = pcir.last;
+
+       if (image->type != 0x70) {
+               if (!(data = nvbios_npdeTp(bios, image->base, &npde)))
+                       return true;
+               image->size = npde.image_size;
+               image->last = npde.last;
+       } else {
+               image->last = true;
+       }
+
+       return true;
+}
+
+bool
+nvbios_image(struct nouveau_bios *bios, int idx, struct nvbios_image *image)
+{
+       memset(image, 0x00, sizeof(*image));
+       do {
+               image->base += image->size;
+               if (image->last || !nvbios_imagen(bios, image))
+                       return false;
+       } while(idx--);
+       return true;
+}
index 626380f9e4c0192e877bef46b4a154b68c972b39..c6579ef32cd11380da2a4e2fb63117d652f6ade2 100644 (file)
@@ -255,6 +255,8 @@ init_i2c(struct nvbios_init *init, int index)
                }
 
                index = init->outp->i2c_index;
+               if (init->outp->type == DCB_OUTPUT_DP)
+                       index += NV_I2C_AUX(0);
        }
 
        return i2c->find(i2c, index);
@@ -278,7 +280,7 @@ init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
        return -ENODEV;
 }
 
-static int
+static u8
 init_rdauxr(struct nvbios_init *init, u32 addr)
 {
        struct nouveau_i2c_port *port = init_i2c(init, -2);
@@ -286,20 +288,24 @@ init_rdauxr(struct nvbios_init *init, u32 addr)
 
        if (port && init_exec(init)) {
                int ret = nv_rdaux(port, addr, &data, 1);
-               if (ret)
-                       return ret;
-               return data;
+               if (ret == 0)
+                       return data;
+               trace("auxch read failed with %d\n", ret);
        }
 
-       return -ENODEV;
+       return 0x00;
 }
 
 static int
 init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
 {
        struct nouveau_i2c_port *port = init_i2c(init, -2);
-       if (port && init_exec(init))
-               return nv_wraux(port, addr, &data, 1);
+       if (port && init_exec(init)) {
+               int ret = nv_wraux(port, addr, &data, 1);
+               if (ret)
+                       trace("auxch write failed with %d\n", ret);
+               return ret;
+       }
        return -ENODEV;
 }
 
@@ -837,6 +843,40 @@ init_io_or(struct nvbios_init *init)
        init_wrvgai(init, 0x03d4, index, data | (1 << or));
 }
 
+/**
+ * INIT_ANDN_REG - opcode 0x47
+ *
+ */
+static void
+init_andn_reg(struct nvbios_init *init)
+{
+       struct nouveau_bios *bios = init->bios;
+       u32  reg = nv_ro32(bios, init->offset + 1);
+       u32 mask = nv_ro32(bios, init->offset + 5);
+
+       trace("ANDN_REG\tR[0x%06x] &= ~0x%08x\n", reg, mask);
+       init->offset += 9;
+
+       init_mask(init, reg, mask, 0);
+}
+
+/**
+ * INIT_OR_REG - opcode 0x48
+ *
+ */
+static void
+init_or_reg(struct nvbios_init *init)
+{
+       struct nouveau_bios *bios = init->bios;
+       u32  reg = nv_ro32(bios, init->offset + 1);
+       u32 mask = nv_ro32(bios, init->offset + 5);
+
+       trace("OR_REG\tR[0x%06x] |= 0x%08x\n", reg, mask);
+       init->offset += 9;
+
+       init_mask(init, reg, 0, mask);
+}
+
 /**
  * INIT_INDEX_ADDRESS_LATCHED - opcode 0x49
  *
@@ -2068,6 +2108,8 @@ static struct nvbios_init_opcode {
        [0x3a] = { init_dp_condition },
        [0x3b] = { init_io_mask_or },
        [0x3c] = { init_io_or },
+       [0x47] = { init_andn_reg },
+       [0x48] = { init_or_reg },
        [0x49] = { init_idx_addr_latched },
        [0x4a] = { init_io_restrict_pll2 },
        [0x4b] = { init_pll2 },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/npde.c b/drivers/gpu/drm/nouveau/core/subdev/bios/npde.c
new file mode 100644 (file)
index 0000000..d694716
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/npde.h>
+#include <subdev/bios/pcir.h>
+
+u32
+nvbios_npdeTe(struct nouveau_bios *bios, u32 base)
+{
+       struct nvbios_pcirT pcir;
+       u8  ver; u16 hdr;
+       u32 data = nvbios_pcirTp(bios, base, &ver, &hdr, &pcir);
+       if (data = (data + hdr + 0x0f) & ~0x0f, data) {
+               switch (nv_ro32(bios, data + 0x00)) {
+               case 0x4544504e: /* NPDE */
+                       break;
+               default:
+                       nv_debug(bios, "%08x: NPDE signature (%08x) unknown\n",
+                                data, nv_ro32(bios, data + 0x00));
+                       data = 0;
+                       break;
+               }
+       }
+       return data;
+}
+
+u32
+nvbios_npdeTp(struct nouveau_bios *bios, u32 base, struct nvbios_npdeT *info)
+{
+       u32 data = nvbios_npdeTe(bios, base);
+       memset(info, 0x00, sizeof(*info));
+       if (data) {
+               info->image_size = nv_ro16(bios, data + 0x08) * 512;
+               info->last = nv_ro08(bios, data + 0x0a) & 0x80;
+       }
+       return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c
new file mode 100644 (file)
index 0000000..91dae26
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/pcir.h>
+
+u32
+nvbios_pcirTe(struct nouveau_bios *bios, u32 base, u8 *ver, u16 *hdr)
+{
+       u32 data = nv_ro16(bios, base + 0x18);
+       if (data) {
+               data += base;
+               switch (nv_ro32(bios, data + 0x00)) {
+               case 0x52494350: /* PCIR */
+               case 0x53494752: /* RGIS */
+               case 0x5344504e: /* NPDS */
+                       *hdr = nv_ro16(bios, data + 0x0a);
+                       *ver = nv_ro08(bios, data + 0x0c);
+                       break;
+               default:
+                       nv_debug(bios, "%08x: PCIR signature (%08x) unknown\n",
+                                data, nv_ro32(bios, data + 0x00));
+                       data = 0;
+                       break;
+               }
+       }
+       return data;
+}
+
+u32
+nvbios_pcirTp(struct nouveau_bios *bios, u32 base, u8 *ver, u16 *hdr,
+             struct nvbios_pcirT *info)
+{
+       u32 data = nvbios_pcirTe(bios, base, ver, hdr);
+       memset(info, 0x00, sizeof(*info));
+       if (data) {
+               info->vendor_id = nv_ro16(bios, data + 0x04);
+               info->device_id = nv_ro16(bios, data + 0x06);
+               info->class_code[0] = nv_ro08(bios, data + 0x0d);
+               info->class_code[1] = nv_ro08(bios, data + 0x0e);
+               info->class_code[2] = nv_ro08(bios, data + 0x0f);
+               info->image_size = nv_ro16(bios, data + 0x10) * 512;
+               info->image_rev = nv_ro16(bios, data + 0x12);
+               info->image_type = nv_ro08(bios, data + 0x14);
+               info->last = nv_ro08(bios, data + 0x15) & 0x80;
+       }
+       return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c
new file mode 100644 (file)
index 0000000..66c56ba
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/image.h>
+#include <subdev/bios/pmu.h>
+
+static u32
+weirdo_pointer(struct nouveau_bios *bios, u32 data)
+{
+       struct nvbios_image image;
+       int idx = 0;
+       if (nvbios_image(bios, idx++, &image)) {
+               data -= image.size;
+               while (nvbios_image(bios, idx++, &image)) {
+                       if (image.type == 0xe0)
+                               return image.base + data;
+               }
+       }
+       return 0;
+}
+
+u32
+nvbios_pmuTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+       struct bit_entry bit_p;
+       u32 data = 0;
+
+       if (!bit_entry(bios, 'p', &bit_p)) {
+               if (bit_p.version == 2 && bit_p.length >= 4)
+                       data = nv_ro32(bios, bit_p.offset + 0x00);
+               if ((data = weirdo_pointer(bios, data))) {
+                       *ver = nv_ro08(bios, data + 0x00); /* maybe? */
+                       *hdr = nv_ro08(bios, data + 0x01);
+                       *len = nv_ro08(bios, data + 0x02);
+                       *cnt = nv_ro08(bios, data + 0x03);
+               }
+       }
+
+       return data;
+}
+
+u32
+nvbios_pmuTp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+            struct nvbios_pmuT *info)
+{
+       u32 data = nvbios_pmuTe(bios, ver, hdr, cnt, len);
+       memset(info, 0x00, sizeof(*info));
+       switch (!!data * *ver) {
+       default:
+               break;
+       }
+       return data;
+}
+
+u32
+nvbios_pmuEe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
+{
+       u8  cnt, len;
+       u32 data = nvbios_pmuTe(bios, ver, hdr, &cnt, &len);
+       if (data && idx < cnt) {
+               data = data + *hdr + (idx * len);
+               *hdr = len;
+               return data;
+       }
+       return 0;
+}
+
+u32
+nvbios_pmuEp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+            struct nvbios_pmuE *info)
+{
+       u32 data = nvbios_pmuEe(bios, idx, ver, hdr);
+       memset(info, 0x00, sizeof(*info));
+       switch (!!data * *ver) {
+       default:
+               info->type = nv_ro08(bios, data + 0x00);
+               info->data = nv_ro32(bios, data + 0x02);
+               break;
+       }
+       return data;
+}
+
+bool
+nvbios_pmuRm(struct nouveau_bios *bios, u8 type, struct nvbios_pmuR *info)
+{
+       struct nvbios_pmuE pmuE;
+       u8  ver, hdr, idx = 0;
+       u32 data;
+       memset(info, 0x00, sizeof(*info));
+       while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
+               if ( pmuE.type == type &&
+                   (data = weirdo_pointer(bios, pmuE.data))) {
+                       info->init_addr_pmu = nv_ro32(bios, data + 0x08);
+                       info->args_addr_pmu = nv_ro32(bios, data + 0x0c);
+                       info->boot_addr     = data + 0x30;
+                       info->boot_addr_pmu = nv_ro32(bios, data + 0x10) +
+                                             nv_ro32(bios, data + 0x18);
+                       info->boot_size     = nv_ro32(bios, data + 0x1c) -
+                                             nv_ro32(bios, data + 0x18);
+                       info->code_addr     = info->boot_addr + info->boot_size;
+                       info->code_addr_pmu = info->boot_addr_pmu +
+                                             info->boot_size;
+                       info->code_size     = nv_ro32(bios, data + 0x20);
+                       info->data_addr     = data + 0x30 +
+                                             nv_ro32(bios, data + 0x24);
+                       info->data_addr_pmu = nv_ro32(bios, data + 0x28);
+                       info->data_size     = nv_ro32(bios, data + 0x2c);
+                       return true;
+               }
+       }
+       return false;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/core/subdev/bios/priv.h
new file mode 100644 (file)
index 0000000..187d225
--- /dev/null
@@ -0,0 +1,25 @@
+#ifndef __NVKM_BIOS_PRIV_H__
+#define __NVKM_BIOS_PRIV_H__
+
+#include <subdev/bios.h>
+
+struct nvbios_source {
+       const char *name;
+       void *(*init)(struct nouveau_bios *, const char *);
+       void  (*fini)(void *);
+       u32   (*read)(void *, u32 offset, u32 length, struct nouveau_bios *);
+       bool rw;
+};
+
+int nvbios_extend(struct nouveau_bios *, u32 length);
+int nvbios_shadow(struct nouveau_bios *);
+
+extern const struct nvbios_source nvbios_rom;
+extern const struct nvbios_source nvbios_ramin;
+extern const struct nvbios_source nvbios_acpi_fast;
+extern const struct nvbios_source nvbios_acpi_slow;
+extern const struct nvbios_source nvbios_pcirom;
+extern const struct nvbios_source nvbios_platform;
+extern const struct nvbios_source nvbios_of;
+
+#endif
index 6c401f70ab99ae380b6c614983a997a9244a97d9..1623c8dfe797362042efddb17bb1e827e13cd6b7 100644 (file)
@@ -25,6 +25,7 @@
 #include <subdev/bios.h>
 #include <subdev/bios/bit.h>
 #include <subdev/bios/ramcfg.h>
+#include <subdev/bios/M0203.h>
 
 static u8
 nvbios_ramcfg_strap(struct nouveau_subdev *subdev)
@@ -54,12 +55,22 @@ nvbios_ramcfg_index(struct nouveau_subdev *subdev)
        u8 strap = nvbios_ramcfg_strap(subdev);
        u32 xlat = 0x00000000;
        struct bit_entry bit_M;
+       struct nvbios_M0203E M0203E;
+       u8 ver, hdr;
 
        if (!bit_entry(bios, 'M', &bit_M)) {
                if (bit_M.version == 1 && bit_M.length >= 5)
                        xlat = nv_ro16(bios, bit_M.offset + 3);
-               if (bit_M.version == 2 && bit_M.length >= 3)
+               if (bit_M.version == 2 && bit_M.length >= 3) {
+                       /*XXX: is M ever shorter than this?
+                        *     if not - what is xlat used for now?
+                        *     also - sigh..
+                        */
+                       if (bit_M.length >= 7 &&
+                           nvbios_M0203Em(bios, strap, &ver, &hdr, &M0203E))
+                               return M0203E.group;
                        xlat = nv_ro16(bios, bit_M.offset + 1);
+               }
        }
 
        if (xlat)
index 585e69331ccce91ac3bc694e7fc09942a06ba993..c5685228c3228c6c23c8d707ade0fb4e024cddf8 100644 (file)
@@ -162,8 +162,9 @@ nvbios_rammapSp(struct nouveau_bios *bios, u32 data,
                p->ramcfg_10_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
                p->ramcfg_10_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
                p->ramcfg_10_02_20 = (nv_ro08(bios, data + 0x02) & 0x20) >> 5;
-               p->ramcfg_10_02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
+               p->ramcfg_10_DLLoff = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
                p->ramcfg_10_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
+               p->ramcfg_10_04_01 = (nv_ro08(bios, data + 0x04) & 0x01) >> 0;
                p->ramcfg_10_05    = (nv_ro08(bios, data + 0x05) & 0xff) >> 0;
                p->ramcfg_10_06    = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
                p->ramcfg_10_07    = (nv_ro08(bios, data + 0x07) & 0xff) >> 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c
new file mode 100644 (file)
index 0000000..bb9e001
--- /dev/null
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "priv.h"
+#include <core/option.h>
+#include <subdev/bios/image.h>
+
+struct shadow {
+       struct nouveau_oclass base;
+       u32 skip;
+       const struct nvbios_source *func;
+       void *data;
+       u32 size;
+       int score;
+};
+
+static bool
+shadow_fetch(struct nouveau_bios *bios, u32 upto)
+{
+       struct shadow *mthd = (void *)nv_object(bios)->oclass;
+       const u32 limit = (upto + 3) & ~3;
+       const u32 start = bios->size;
+       void *data = mthd->data;
+       if (nvbios_extend(bios, limit) > 0) {
+               u32 read = mthd->func->read(data, start, limit - start, bios);
+               bios->size = start + read;
+       }
+       return bios->size >= limit;
+}
+
+static u8
+shadow_rd08(struct nouveau_object *object, u64 addr)
+{
+       struct nouveau_bios *bios = (void *)object;
+       if (shadow_fetch(bios, addr + 1))
+               return bios->data[addr];
+       return 0x00;
+}
+
+static u16
+shadow_rd16(struct nouveau_object *object, u64 addr)
+{
+       struct nouveau_bios *bios = (void *)object;
+       if (shadow_fetch(bios, addr + 2))
+               return get_unaligned_le16(&bios->data[addr]);
+       return 0x0000;
+}
+
+static u32
+shadow_rd32(struct nouveau_object *object, u64 addr)
+{
+       struct nouveau_bios *bios = (void *)object;
+       if (shadow_fetch(bios, addr + 4))
+               return get_unaligned_le32(&bios->data[addr]);
+       return 0x00000000;
+}
+
+static struct nouveau_oclass
+shadow_class = {
+       .handle = NV_SUBDEV(VBIOS, 0x00),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .rd08 = shadow_rd08,
+               .rd16 = shadow_rd16,
+               .rd32 = shadow_rd32,
+       },
+};
+
+static int
+shadow_image(struct nouveau_bios *bios, int idx, struct shadow *mthd)
+{
+       struct nvbios_image image;
+       int score = 1;
+
+       if (!nvbios_image(bios, idx, &image)) {
+               nv_debug(bios, "image %d invalid\n", idx);
+               return 0;
+       }
+       nv_debug(bios, "%08x: type %02x, %d bytes\n",
+                image.base, image.type, image.size);
+
+       if (!shadow_fetch(bios, image.size)) {
+               nv_debug(bios, "%08x: fetch failed\n", image.base);
+               return 0;
+       }
+
+       switch (image.type) {
+       case 0x00:
+               if (nvbios_checksum(&bios->data[image.base], image.size)) {
+                       nv_debug(bios, "%08x: checksum failed\n", image.base);
+                       if (mthd->func->rw)
+                               score += 1;
+                       score += 1;
+               } else {
+                       score += 3;
+               }
+               break;
+       default:
+               score += 3;
+               break;
+       }
+
+       if (!image.last)
+               score += shadow_image(bios, idx + 1, mthd);
+       return score;
+}
+
+static int
+shadow_score(struct nouveau_bios *bios, struct shadow *mthd)
+{
+       struct nouveau_oclass *oclass = nv_object(bios)->oclass;
+       int score;
+       nv_object(bios)->oclass = &mthd->base;
+       score = shadow_image(bios, 0, mthd);
+       nv_object(bios)->oclass = oclass;
+       return score;
+
+}
+
+static int
+shadow_method(struct nouveau_bios *bios, struct shadow *mthd, const char *name)
+{
+       const struct nvbios_source *func = mthd->func;
+       if (func->name) {
+               nv_debug(bios, "trying %s...\n", name ? name : func->name);
+               if (func->init) {
+                       mthd->data = func->init(bios, name);
+                       if (IS_ERR(mthd->data)) {
+                               mthd->data = NULL;
+                               return 0;
+                       }
+               }
+               mthd->score = shadow_score(bios, mthd);
+               if (func->fini)
+                       func->fini(mthd->data);
+               nv_debug(bios, "scored %d\n", mthd->score);
+               mthd->data = bios->data;
+               mthd->size = bios->size;
+               bios->data  = NULL;
+               bios->size  = 0;
+       }
+       return mthd->score;
+}
+
+static u32
+shadow_fw_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+       const struct firmware *fw = data;
+       if (offset + length <= fw->size) {
+               memcpy(bios->data + offset, fw->data + offset, length);
+               return length;
+       }
+       return 0;
+}
+
+static void *
+shadow_fw_init(struct nouveau_bios *bios, const char *name)
+{
+       struct device *dev = &nv_device(bios)->pdev->dev;
+       const struct firmware *fw;
+       int ret = request_firmware(&fw, name, dev);
+       if (ret)
+               return ERR_PTR(-ENOENT);
+       return (void *)fw;
+}
+
+static const struct nvbios_source
+shadow_fw = {
+       .name = "firmware",
+       .init = shadow_fw_init,
+       .fini = (void(*)(void *))release_firmware,
+       .read = shadow_fw_read,
+       .rw = false,
+};
+
+int
+nvbios_shadow(struct nouveau_bios *bios)
+{
+       struct shadow mthds[] = {
+               { shadow_class, 0, &nvbios_of },
+               { shadow_class, 0, &nvbios_ramin },
+               { shadow_class, 0, &nvbios_rom },
+               { shadow_class, 0, &nvbios_acpi_fast },
+               { shadow_class, 4, &nvbios_acpi_slow },
+               { shadow_class, 1, &nvbios_pcirom },
+               { shadow_class, 1, &nvbios_platform },
+               { shadow_class }
+       }, *mthd = mthds, *best = NULL;
+       const char *optarg;
+       char *source;
+       int optlen;
+
+       /* handle user-specified bios source */
+       optarg = nouveau_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
+       source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
+       if (source) {
+               /* try to match one of the built-in methods */
+               for (mthd = mthds; mthd->func; mthd++) {
+                       if (mthd->func->name &&
+                           !strcasecmp(source, mthd->func->name)) {
+                               best = mthd;
+                               if (shadow_method(bios, mthd, NULL))
+                                       break;
+                       }
+               }
+
+               /* otherwise, attempt to load as firmware */
+               if (!best && (best = mthd)) {
+                       mthd->func = &shadow_fw;
+                       shadow_method(bios, mthd, source);
+                       mthd->func = NULL;
+               }
+
+               if (!best->score) {
+                       nv_error(bios, "%s invalid\n", source);
+                       kfree(source);
+                       source = NULL;
+               }
+       }
+
+       /* scan all potential bios sources, looking for best image */
+       if (!best || !best->score) {
+               for (mthd = mthds, best = mthd; mthd->func; mthd++) {
+                       if (!mthd->skip || best->score < mthd->skip) {
+                               if (shadow_method(bios, mthd, NULL)) {
+                                       if (mthd->score > best->score)
+                                               best = mthd;
+                               }
+                       }
+               }
+       }
+
+       /* cleanup the ones we didn't use */
+       for (mthd = mthds; mthd->func; mthd++) {
+               if (mthd != best)
+                       kfree(mthd->data);
+       }
+
+       if (!best->score) {
+               nv_fatal(bios, "unable to locate usable image\n");
+               return -EINVAL;
+       }
+
+       nv_info(bios, "using image from %s\n", best->func ?
+               best->func->name : source);
+       bios->data = best->data;
+       bios->size = best->size;
+       kfree(source);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c
new file mode 100644 (file)
index 0000000..bc130c1
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+#else
+static inline bool
+nouveau_acpi_rom_supported(struct pci_dev *pdev)
+{
+       return false;
+}
+
+static inline int
+nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
+{
+       return -EINVAL;
+}
+#endif
+
+/* This version of the shadow function disobeys the ACPI spec and tries
+ * to fetch in units of more than 4KiB at a time.  This is a LOT faster
+ * on some systems, such as Lenovo W530.
+ */
+static u32
+acpi_read_fast(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+       u32 limit = (offset + length + 0xfff) & ~0xfff;
+       u32 start = offset & ~0x00000fff;
+       u32 fetch = limit - start;
+
+       if (nvbios_extend(bios, limit) > 0) {
+               int ret = nouveau_acpi_get_bios_chunk(bios->data, start, fetch);
+               if (ret == fetch)
+                       return fetch;
+       }
+
+       return 0;
+}
+
+/* Other systems, such as the one in fdo#55948, will report a success
+ * but only return 4KiB of data.  The common bios fetching logic will
+ * detect an invalid image, and fall back to this version of the read
+ * function.
+ */
+static u32
+acpi_read_slow(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+       u32 limit = (offset + length + 0xfff) & ~0xfff;
+       u32 start = offset & ~0xfff;
+       u32 fetch = 0;
+
+       if (nvbios_extend(bios, limit) > 0) {
+               while (start + fetch < limit) {
+                       int ret = nouveau_acpi_get_bios_chunk(bios->data,
+                                                             start + fetch,
+                                                             0x1000);
+                       if (ret != 0x1000)
+                               break;
+                       fetch += 0x1000;
+               }
+       }
+
+       return fetch;
+}
+
+static void *
+acpi_init(struct nouveau_bios *bios, const char *name)
+{
+       if (!nouveau_acpi_rom_supported(nv_device(bios)->pdev))
+               return ERR_PTR(-ENODEV);
+       return NULL;
+}
+
+const struct nvbios_source
+nvbios_acpi_fast = {
+       .name = "ACPI",
+       .init = acpi_init,
+       .read = acpi_read_fast,
+       .rw = false,
+};
+
+const struct nvbios_source
+nvbios_acpi_slow = {
+       .name = "ACPI",
+       .init = acpi_init,
+       .read = acpi_read_slow,
+       .rw = false,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c
new file mode 100644 (file)
index 0000000..3abe487
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+#if defined(__powerpc__)
+struct priv {
+       const void __iomem *data;
+       int size;
+};
+
+static u32
+of_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+       struct priv *priv = data;
+       if (offset + length <= priv->size) {
+               memcpy_fromio(bios->data + offset, priv->data + offset, length);
+               return length;
+       }
+       return 0;
+}
+
+static void *
+of_init(struct nouveau_bios *bios, const char *name)
+{
+       struct pci_dev *pdev = nv_device(bios)->pdev;
+       struct device_node *dn;
+       struct priv *priv;
+       if (!(dn = pci_device_to_OF_node(pdev)))
+               return ERR_PTR(-ENODEV);
+       if (!(priv = kzalloc(sizeof(*priv), GFP_KERNEL)))
+               return ERR_PTR(-ENOMEM);
+       if ((priv->data = of_get_property(dn, "NVDA,BMP", &priv->size)))
+               return priv;
+       kfree(priv);
+       return ERR_PTR(-EINVAL);
+}
+
+const struct nvbios_source
+nvbios_of = {
+       .name = "OpenFirmware",
+       .init = of_init,
+       .fini = (void(*)(void *))kfree,
+       .read = of_read,
+       .rw = false,
+};
+#else
+const struct nvbios_source
+nvbios_of = {
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c
new file mode 100644 (file)
index 0000000..1d0389c
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+struct priv {
+       struct pci_dev *pdev;
+       void __iomem *rom;
+       size_t size;
+};
+
+static u32
+pcirom_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+       struct priv *priv = data;
+       if (offset + length <= priv->size) {
+               memcpy_fromio(bios->data + offset, priv->rom + offset, length);
+               return length;
+       }
+       return 0;
+}
+
+static void
+pcirom_fini(void *data)
+{
+       struct priv *priv = data;
+       pci_unmap_rom(priv->pdev, priv->rom);
+       pci_disable_rom(priv->pdev);
+       kfree(priv);
+}
+
+static void *
+pcirom_init(struct nouveau_bios *bios, const char *name)
+{
+       struct pci_dev *pdev = nv_device(bios)->pdev;
+       struct priv *priv = NULL;
+       int ret;
+
+       if (!(ret = pci_enable_rom(pdev))) {
+               if (ret = -ENOMEM,
+                   (priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
+                       if (ret = -EFAULT,
+                           (priv->rom = pci_map_rom(pdev, &priv->size))) {
+                               priv->pdev = pdev;
+                               return priv;
+                       }
+                       kfree(priv);
+               }
+               pci_disable_rom(pdev);
+       }
+
+       return ERR_PTR(ret);
+}
+
+const struct nvbios_source
+nvbios_pcirom = {
+       .name = "PCIROM",
+       .init = pcirom_init,
+       .fini = pcirom_fini,
+       .read = pcirom_read,
+       .rw = true,
+};
+
+static void *
+platform_init(struct nouveau_bios *bios, const char *name)
+{
+       struct pci_dev *pdev = nv_device(bios)->pdev;
+       struct priv *priv;
+       int ret = -ENOMEM;
+
+       if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
+               if (ret = -ENODEV,
+                   (priv->rom = pci_platform_rom(pdev, &priv->size)))
+                       return priv;
+               kfree(priv);
+       }
+
+       return ERR_PTR(ret);
+}
+
+const struct nvbios_source
+nvbios_platform = {
+       .name = "PLATFORM",
+       .init = platform_init,
+       .fini = (void(*)(void *))kfree,
+       .read = pcirom_read,
+       .rw = true,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
new file mode 100644 (file)
index 0000000..5e58bba
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+struct priv {
+       struct nouveau_bios *bios;
+       u32 bar0;
+};
+
+static u32
+pramin_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+       u32 i;
+       if (offset + length <= 0x00100000) {
+               for (i = offset; i < offset + length; i += 4)
+                       *(u32 *)&bios->data[i] = nv_rd32(bios, 0x700000 + i);
+               return length;
+       }
+       return 0;
+}
+
+static void
+pramin_fini(void *data)
+{
+       struct priv *priv = data;
+       nv_wr32(priv->bios, 0x001700, priv->bar0);
+       kfree(priv);
+}
+
+static void *
+pramin_init(struct nouveau_bios *bios, const char *name)
+{
+       struct priv *priv = NULL;
+       u64 addr = 0;
+
+       /* PRAMIN always potentially available prior to nv50 */
+       if (nv_device(bios)->card_type < NV_50)
+               return NULL;
+
+       /* we can't get the bios image pointer without PDISP */
+       if (nv_device(bios)->card_type >= GM100)
+               addr = nv_rd32(bios, 0x021c04);
+       else
+       if (nv_device(bios)->card_type >= NV_C0)
+               addr = nv_rd32(bios, 0x022500);
+       if (addr & 0x00000001) {
+               nv_debug(bios, "... display disabled\n");
+               return ERR_PTR(-ENODEV);
+       }
+
+       /* check that the window is enabled and in vram, particularly
+        * important as we don't want to be touching vram on an
+        * uninitialised board
+        */
+       addr = nv_rd32(bios, 0x619f04);
+       if (!(addr & 0x00000008)) {
+               nv_debug(bios, "... not enabled\n");
+               return ERR_PTR(-ENODEV);
+       }
+       if ( (addr & 0x00000003) != 1) {
+               nv_debug(bios, "... not in vram\n");
+               return ERR_PTR(-ENODEV);
+       }
+
+       /* some alternate method inherited from xf86-video-nv... */
+       addr = (addr & 0xffffff00) << 8;
+       if (!addr) {
+               addr  = (u64)nv_rd32(bios, 0x001700) << 16;
+               addr += 0xf0000;
+       }
+
+       /* modify bar0 PRAMIN window to cover the bios image */
+       if (!(priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
+               nv_error(bios, "... out of memory\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       priv->bios = bios;
+       priv->bar0 = nv_rd32(bios, 0x001700);
+       nv_wr32(bios, 0x001700, addr >> 16);
+       return priv;
+}
+
+const struct nvbios_source
+nvbios_ramin = {
+       .name = "PRAMIN",
+       .init = pramin_init,
+       .fini = pramin_fini,
+       .read = pramin_read,
+       .rw = true,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c
new file mode 100644 (file)
index 0000000..b7992bc
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+static u32
+prom_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+       u32 i;
+       if (offset + length <= 0x00100000) {
+               for (i = offset; i < offset + length; i += 4)
+                       *(u32 *)&bios->data[i] = nv_rd32(bios, 0x300000 + i);
+               return length;
+       }
+       return 0;
+}
+
+static void
+prom_fini(void *data)
+{
+       struct nouveau_bios *bios = data;
+       if (nv_device(bios)->card_type < NV_50)
+               nv_mask(bios, 0x001850, 0x00000001, 0x00000001);
+       else
+               nv_mask(bios, 0x088050, 0x00000001, 0x00000001);
+}
+
+static void *
+prom_init(struct nouveau_bios *bios, const char *name)
+{
+       if (nv_device(bios)->card_type < NV_50) {
+               if (nv_device(bios)->card_type == NV_40 &&
+                   nv_device(bios)->chipset >= 0x4c)
+                       return ERR_PTR(-ENODEV);
+               nv_mask(bios, 0x001850, 0x00000001, 0x00000000);
+       } else {
+               nv_mask(bios, 0x088050, 0x00000001, 0x00000000);
+       }
+       return bios;
+}
+
+const struct nvbios_source
+nvbios_rom = {
+       .name = "PROM",
+       .init = prom_init,
+       .fini = prom_fini,
+       .read = prom_read,
+       .rw = false,
+};
index 46d955eb51eba6f956b238d0134fb2043fec7bd4..8521eca1ed9c576657ba5619d51e24f876e8e52b 100644 (file)
@@ -93,10 +93,44 @@ nvbios_timingEp(struct nouveau_bios *bios, int idx,
        p->timing_hdr = *hdr;
        switch (!!data * *ver) {
        case 0x10:
-               p->timing_10_WR = nv_ro08(bios, data + 0x00);
-               p->timing_10_CL = nv_ro08(bios, data + 0x02);
-               p->timing_10_ODT = nv_ro08(bios, data + 0x0e) & 0x07;
-               p->timing_10_CWL = nv_ro08(bios, data + 0x13);
+               p->timing_10_WR    = nv_ro08(bios, data + 0x00);
+               p->timing_10_WTR   = nv_ro08(bios, data + 0x01);
+               p->timing_10_CL    = nv_ro08(bios, data + 0x02);
+               p->timing_10_RC    = nv_ro08(bios, data + 0x03);
+               p->timing_10_RFC   = nv_ro08(bios, data + 0x05);
+               p->timing_10_RAS   = nv_ro08(bios, data + 0x07);
+               p->timing_10_RP    = nv_ro08(bios, data + 0x09);
+               p->timing_10_RCDRD = nv_ro08(bios, data + 0x0a);
+               p->timing_10_RCDWR = nv_ro08(bios, data + 0x0b);
+               p->timing_10_RRD   = nv_ro08(bios, data + 0x0c);
+               p->timing_10_13    = nv_ro08(bios, data + 0x0d);
+               p->timing_10_ODT   = nv_ro08(bios, data + 0x0e) & 0x07;
+
+               p->timing_10_24  = 0xff;
+               p->timing_10_21  = 0;
+               p->timing_10_20  = 0;
+               p->timing_10_CWL = 0;
+               p->timing_10_18  = 0;
+               p->timing_10_16  = 0;
+
+               switch (min_t(u8, *hdr, 25)) {
+               case 25:
+                       p->timing_10_24  = nv_ro08(bios, data + 0x18);
+               case 24:
+               case 23:
+               case 22:
+                       p->timing_10_21  = nv_ro08(bios, data + 0x15);
+               case 21:
+                       p->timing_10_20  = nv_ro08(bios, data + 0x14);
+               case 20:
+                       p->timing_10_CWL = nv_ro08(bios, data + 0x13);
+               case 19:
+                       p->timing_10_18  = nv_ro08(bios, data + 0x12);
+               case 18:
+               case 17:
+                       p->timing_10_16  = nv_ro08(bios, data + 0x10);
+               }
+
                break;
        case 0x20:
                p->timing[0] = nv_ro32(bios, data + 0x00);
index 425a8d5e9129fa26cb728a342bd3302f9397082c..fb4fad374bdd8cb4ffbb54dbe0979ef983e1dc52 100644 (file)
@@ -109,7 +109,7 @@ struct gk20a_clk_pllg_params {
 };
 
 static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
-       .min_vco = 1000, .max_vco = 1700,
+       .min_vco = 1000, .max_vco = 2064,
        .min_u = 12, .max_u = 38,
        .min_m = 1, .max_m = 255,
        .min_n = 8, .max_n = 255,
@@ -470,76 +470,91 @@ gk20a_pstates[] = {
        {
                .base = {
                        .domain[nv_clk_src_gpc] = 72000,
+                       .voltage = 0,
                },
        },
        {
                .base = {
                        .domain[nv_clk_src_gpc] = 108000,
+                       .voltage = 1,
                },
        },
        {
                .base = {
                        .domain[nv_clk_src_gpc] = 180000,
+                       .voltage = 2,
                },
        },
        {
                .base = {
                        .domain[nv_clk_src_gpc] = 252000,
+                       .voltage = 3,
                },
        },
        {
                .base = {
                        .domain[nv_clk_src_gpc] = 324000,
+                       .voltage = 4,
                },
        },
        {
                .base = {
                        .domain[nv_clk_src_gpc] = 396000,
+                       .voltage = 5,
                },
        },
        {
                .base = {
                        .domain[nv_clk_src_gpc] = 468000,
+                       .voltage = 6,
                },
        },
        {
                .base = {
                        .domain[nv_clk_src_gpc] = 540000,
+                       .voltage = 7,
                },
        },
        {
                .base = {
                        .domain[nv_clk_src_gpc] = 612000,
+                       .voltage = 8,
                },
        },
        {
                .base = {
                        .domain[nv_clk_src_gpc] = 648000,
+                       .voltage = 9,
                },
        },
        {
                .base = {
                        .domain[nv_clk_src_gpc] = 684000,
+                       .voltage = 10,
                },
        },
        {
                .base = {
                        .domain[nv_clk_src_gpc] = 708000,
+                       .voltage = 11,
                },
        },
        {
                .base = {
                        .domain[nv_clk_src_gpc] = 756000,
+                       .voltage = 12,
                },
        },
        {
                .base = {
                        .domain[nv_clk_src_gpc] = 804000,
+                       .voltage = 13,
                },
        },
        {
                .base = {
                        .domain[nv_clk_src_gpc] = 852000,
+                       .voltage = 14,
                },
        },
 };
index 094551d8ad9b44619f9950c30c6553d1f0a5b730..07ad0124767562a484b6ef3746fca714bfaaf516 100644 (file)
@@ -510,7 +510,7 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        int ret;
 
        ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, NULL, 0,
-                                  false, &priv);
+                                  true, &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
index 239acfe876c3ddb4262d2e696cd4a59e1e06d951..0e45cee82463af6553be67389282cbf74290b009 100644 (file)
@@ -24,8 +24,6 @@
 
 #include <core/option.h>
 
-#include <subdev/bios.h>
-#include <subdev/bios/init.h>
 #include <subdev/vga.h>
 
 #include "priv.h"
@@ -56,7 +54,7 @@ _nouveau_devinit_init(struct nouveau_object *object)
        if (ret)
                return ret;
 
-       ret = nvbios_init(&devinit->base, devinit->post);
+       ret = impl->post(&devinit->base, devinit->post);
        if (ret)
                return ret;
 
index c69bc7f54e371cf7ca016ec929082d6ecd3f3c5e..4ba43d6a1ec8189dae09b6aa6455a5fb6e580229 100644 (file)
@@ -24,7 +24,7 @@
 
 #include "nv50.h"
 
-static u64
+u64
 gm107_devinit_disable(struct nouveau_devinit *devinit)
 {
        struct nv50_devinit_priv *priv = (void *)devinit;
@@ -53,4 +53,5 @@ gm107_devinit_oclass = &(struct nouveau_devinit_impl) {
        },
        .pll_set = nvc0_devinit_pll_set,
        .disable = gm107_devinit_disable,
+       .post = nvbios_init,
 }.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c
new file mode 100644 (file)
index 0000000..e44a866
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pmu.h>
+
+#include "nv50.h"
+
+static void
+pmu_code(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len, bool sec)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       int i;
+
+       nv_wr32(priv, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu);
+       for (i = 0; i < len; i += 4) {
+               if ((i & 0xff) == 0)
+                       nv_wr32(priv, 0x10a188, (pmu + i) >> 8);
+               nv_wr32(priv, 0x10a184, nv_ro32(bios, img + i));
+       }
+
+       while (i & 0xff) {
+               nv_wr32(priv, 0x10a184, 0x00000000);
+               i += 4;
+       }
+}
+
+static void
+pmu_data(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       int i;
+
+       nv_wr32(priv, 0x10a1c0, 0x01000000 | pmu);
+       for (i = 0; i < len; i += 4)
+               nv_wr32(priv, 0x10a1c4, nv_ro32(bios, img + i));
+}
+
+static u32
+pmu_args(struct nv50_devinit_priv *priv, u32 argp, u32 argi)
+{
+       nv_wr32(priv, 0x10a1c0, argp);
+       nv_wr32(priv, 0x10a1c0, nv_rd32(priv, 0x10a1c4) + argi);
+       return nv_rd32(priv, 0x10a1c4);
+}
+
+static void
+pmu_exec(struct nv50_devinit_priv *priv, u32 init_addr)
+{
+       nv_wr32(priv, 0x10a104, init_addr);
+       nv_wr32(priv, 0x10a10c, 0x00000000);
+       nv_wr32(priv, 0x10a100, 0x00000002);
+}
+
+static int
+pmu_load(struct nv50_devinit_priv *priv, u8 type, bool post,
+        u32 *init_addr_pmu, u32 *args_addr_pmu)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       struct nvbios_pmuR pmu;
+
+       if (!nvbios_pmuRm(bios, type, &pmu)) {
+               nv_error(priv, "VBIOS PMU fuc %02x not found\n", type);
+               return -EINVAL;
+       }
+
+       if (!post)
+               return 0;
+
+       pmu_code(priv, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false);
+       pmu_code(priv, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true);
+       pmu_data(priv, pmu.data_addr_pmu, pmu.data_addr, pmu.data_size);
+
+       if (init_addr_pmu) {
+               *init_addr_pmu = pmu.init_addr_pmu;
+               *args_addr_pmu = pmu.args_addr_pmu;
+               return 0;
+       }
+
+       return pmu_exec(priv, pmu.init_addr_pmu), 0;
+}
+
+static int
+gm204_devinit_post(struct nouveau_subdev *subdev, bool post)
+{
+       struct nv50_devinit_priv *priv = (void *)nouveau_devinit(subdev);
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       struct bit_entry bit_I;
+       u32 init, args;
+       int ret;
+
+       if (bit_entry(bios, 'I', &bit_I) || bit_I.version != 1 ||
+                                           bit_I.length < 0x1c) {
+               nv_error(priv, "VBIOS PMU init data not found\n");
+               return -EINVAL;
+       }
+
+       /* reset PMU and load init table parser ucode */
+       if (post) {
+               nv_mask(priv, 0x000200, 0x00002000, 0x00000000);
+               nv_mask(priv, 0x000200, 0x00002000, 0x00002000);
+               nv_rd32(priv, 0x000200);
+               while (nv_rd32(priv, 0x10a10c) & 0x00000006) {
+               }
+       }
+
+       ret = pmu_load(priv, 0x04, post, &init, &args);
+       if (ret)
+               return ret;
+
+       /* upload first chunk of init data */
+       if (post) {
+               u32 pmu = pmu_args(priv, args + 0x08, 0x08);
+               u32 img = nv_ro16(bios, bit_I.offset + 0x14);
+               u32 len = nv_ro16(bios, bit_I.offset + 0x16);
+               pmu_data(priv, pmu, img, len);
+       }
+
+       /* upload second chunk of init data */
+       if (post) {
+               u32 pmu = pmu_args(priv, args + 0x08, 0x10);
+               u32 img = nv_ro16(bios, bit_I.offset + 0x18);
+               u32 len = nv_ro16(bios, bit_I.offset + 0x1a);
+               pmu_data(priv, pmu, img, len);
+       }
+
+       /* execute init tables */
+       if (post) {
+               nv_wr32(priv, 0x10a040, 0x00005000);
+               pmu_exec(priv, init);
+               while (!(nv_rd32(priv, 0x10a040) & 0x00002000)) {
+               }
+       }
+
+       /* load and execute some other ucode image (bios therm?) */
+       return pmu_load(priv, 0x01, post, NULL, NULL);
+}
+
+struct nouveau_oclass *
+gm204_devinit_oclass = &(struct nouveau_devinit_impl) {
+       .base.handle = NV_SUBDEV(DEVINIT, 0x07),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv50_devinit_ctor,
+               .dtor = _nouveau_devinit_dtor,
+               .init = nv50_devinit_init,
+               .fini = _nouveau_devinit_fini,
+       },
+       .pll_set = nvc0_devinit_pll_set,
+       .disable = gm107_devinit_disable,
+       .post = gm204_devinit_post,
+}.base;
index 052ad690b468da81ec1a95e04eace07834f70c88..65651c50f6ea3ddce07e754bcb2f681ff2af8256 100644 (file)
@@ -464,4 +464,5 @@ nv04_devinit_oclass = &(struct nouveau_devinit_impl) {
        },
        .meminit = nv04_devinit_meminit,
        .pll_set = nv04_devinit_pll_set,
+       .post = nvbios_init,
 }.base;
index 4a19c10e517809ab7b348803481bcb0c0e98a0eb..a2007a3efc4d48d803ca1150fa44741a8b7085cd 100644 (file)
@@ -136,4 +136,5 @@ nv05_devinit_oclass = &(struct nouveau_devinit_impl) {
        },
        .meminit = nv05_devinit_meminit,
        .pll_set = nv04_devinit_pll_set,
+       .post = nvbios_init,
 }.base;
index 3b8d657da2798445f620e9662467df8696304280..178b46f79b502920287a5710d311a9f83d722f92 100644 (file)
@@ -107,4 +107,5 @@ nv10_devinit_oclass = &(struct nouveau_devinit_impl) {
        },
        .meminit = nv10_devinit_meminit,
        .pll_set = nv04_devinit_pll_set,
+       .post = nvbios_init,
 }.base;
index 526d0c6faacd3d2623f1dae3aa2cd1b92087a00e..995dd97af3e923a074dc541603c0d39a03b825f3 100644 (file)
@@ -34,4 +34,5 @@ nv1a_devinit_oclass = &(struct nouveau_devinit_impl) {
                .fini = nv04_devinit_fini,
        },
        .pll_set = nv04_devinit_pll_set,
+       .post = nvbios_init,
 }.base;
index 04bc9732644ccd706c72fdc055d534e61272f625..915089fb46f7965bae615d81b1f3176c306128a0 100644 (file)
@@ -71,4 +71,5 @@ nv20_devinit_oclass = &(struct nouveau_devinit_impl) {
        },
        .meminit = nv20_devinit_meminit,
        .pll_set = nv04_devinit_pll_set,
+       .post = nvbios_init,
 }.base;
index b46c62a1d5d86a9a44a0db88932dacbca6a10598..968334d1dca4a5ac9cbfd9b486488d0467d82256 100644 (file)
@@ -26,6 +26,7 @@
 #include <subdev/bios/dcb.h>
 #include <subdev/bios/disp.h>
 #include <subdev/bios/init.h>
+#include <subdev/ibus.h>
 #include <subdev/vga.h>
 
 #include "nv50.h"
@@ -91,6 +92,7 @@ int
 nv50_devinit_init(struct nouveau_object *object)
 {
        struct nouveau_bios *bios = nouveau_bios(object);
+       struct nouveau_ibus *ibus = nouveau_ibus(object);
        struct nv50_devinit_priv *priv = (void *)object;
        struct nvbios_outp info;
        struct dcb_output outp;
@@ -105,6 +107,13 @@ nv50_devinit_init(struct nouveau_object *object)
                }
        }
 
+       /* some boards appear to require certain priv register timeouts
+        * to be bumped before runing devinit scripts.  not a clue why
+        * the vbios engineers didn't make the scripts just work...
+        */
+       if (priv->base.post && ibus)
+               nv_ofuncs(ibus)->init(nv_object(ibus));
+
        ret = nouveau_devinit_init(&priv->base);
        if (ret)
                return ret;
@@ -160,4 +169,5 @@ nv50_devinit_oclass = &(struct nouveau_devinit_impl) {
        },
        .pll_set = nv50_devinit_pll_set,
        .disable = nv50_devinit_disable,
+       .post = nvbios_init,
 }.base;
index 51d5076333ecefc8ba5167a3a2ccbedbd12a9b4a..f412bb7f780e06020a95100fe2b4bdd07306fdbf 100644 (file)
@@ -18,4 +18,6 @@ int  nva3_devinit_pll_set(struct nouveau_devinit *, u32, u32);
 
 int  nvc0_devinit_pll_set(struct nouveau_devinit *, u32, u32);
 
+u64  gm107_devinit_disable(struct nouveau_devinit *);
+
 #endif
index 787422505d87b934b9f26af17bdab9542896fd47..a7c80ded77cdce591f65c035e8cd69f88db7dc8a 100644 (file)
@@ -60,4 +60,5 @@ nv84_devinit_oclass = &(struct nouveau_devinit_impl) {
        },
        .pll_set = nv50_devinit_pll_set,
        .disable = nv84_devinit_disable,
+       .post = nvbios_init,
 }.base;
index 2b0e963fc6f01358dc2d4c921af0b2ec3e845c43..a773253a17f6041a0b709274cf322933d947268c 100644 (file)
@@ -59,4 +59,5 @@ nv98_devinit_oclass = &(struct nouveau_devinit_impl) {
        },
        .pll_set = nv50_devinit_pll_set,
        .disable = nv98_devinit_disable,
+       .post = nvbios_init,
 }.base;
index 006cf348bda767e8219901ec63c70aadd7d89365..b9cd9e53f760dfaab68806c4bf9b61598acaf452 100644 (file)
@@ -142,4 +142,5 @@ nva3_devinit_oclass = &(struct nouveau_devinit_impl) {
        .pll_set = nva3_devinit_pll_set,
        .disable = nva3_devinit_disable,
        .mmio    = nva3_devinit_mmio,
+       .post = nvbios_init,
 }.base;
index 4fc68d27eff39e2f460f44dd63f89f9218eb3330..3729846a8e5c980e8354c8214d9c87eedad079c3 100644 (file)
@@ -60,4 +60,5 @@ nvaf_devinit_oclass = &(struct nouveau_devinit_impl) {
        },
        .pll_set = nva3_devinit_pll_set,
        .disable = nvaf_devinit_disable,
+       .post = nvbios_init,
 }.base;
index 30c765747eeaebb2c9a1526b31306587d64e4a5e..80bd7f5eda3d0def565711f8c8ba9db54f265bd4 100644 (file)
@@ -115,4 +115,5 @@ nvc0_devinit_oclass = &(struct nouveau_devinit_impl) {
        },
        .pll_set = nvc0_devinit_pll_set,
        .disable = nvc0_devinit_disable,
+       .post = nvbios_init,
 }.base;
index f0e8683ad8405639d0406c3dc7be0dad153395d5..cbcd51852472aa54ab0667bb0b9f3bac03cd2cac 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
+#include <subdev/bios/init.h>
 #include <subdev/clock/pll.h>
 #include <subdev/devinit.h>
 
@@ -12,6 +13,7 @@ struct nouveau_devinit_impl {
        int  (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
        u64  (*disable)(struct nouveau_devinit *);
        u32  (*mmio)(struct nouveau_devinit *, u32);
+       int  (*post)(struct nouveau_subdev *, bool);
 };
 
 #define nouveau_devinit_create(p,e,o,d)                                        \
index f009d8a39d9d70f4063ee8df300ba399682c093f..c866148c440fee88d063736545f38cc35fb6bec7 100644 (file)
  */
 
 #include <subdev/bios.h>
-#include <subdev/bios/bit.h>
+#include <subdev/bios/M0203.h>
 
 #include "priv.h"
 
 int
 nouveau_fb_bios_memtype(struct nouveau_bios *bios)
 {
-       struct bit_entry M;
-       u8 ramcfg;
-
-       ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
-       if (!bit_entry(bios, 'M', &M) && M.version == 2 && M.length >= 5) {
-               u16 table   = nv_ro16(bios, M.offset + 3);
-               u8  version = nv_ro08(bios, table + 0);
-               u8  header  = nv_ro08(bios, table + 1);
-               u8  record  = nv_ro08(bios, table + 2);
-               u8  entries = nv_ro08(bios, table + 3);
-               if (table && version == 0x10 && ramcfg < entries) {
-                       u16 entry = table + header + (ramcfg * record);
-                       switch (nv_ro08(bios, entry) & 0x0f) {
-                       case 0: return NV_MEM_TYPE_DDR2;
-                       case 1: return NV_MEM_TYPE_DDR3;
-                       case 2: return NV_MEM_TYPE_GDDR3;
-                       case 3: return NV_MEM_TYPE_GDDR5;
-                       default:
-                               break;
-                       }
-
+       const u8 ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
+       struct nvbios_M0203E M0203E;
+       u8 ver, hdr;
+
+       if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
+               switch (M0203E.type) {
+               case M0203E_TYPE_DDR2 : return NV_MEM_TYPE_DDR2;
+               case M0203E_TYPE_DDR3 : return NV_MEM_TYPE_DDR3;
+               case M0203E_TYPE_GDDR3: return NV_MEM_TYPE_GDDR3;
+               case M0203E_TYPE_GDDR5: return NV_MEM_TYPE_GDDR5;
+               default:
+                       nv_warn(bios, "M0203E type %02x\n", M0203E.type);
+                       return NV_MEM_TYPE_UNKNOWN;
                }
        }
 
+       nv_warn(bios, "M0203E not matched!\n");
        return NV_MEM_TYPE_UNKNOWN;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c
new file mode 100644 (file)
index 0000000..d85a25d
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ *         Roy Spliet <rspliet@eclipso.eu>
+ */
+
+#include <subdev/bios.h>
+#include "priv.h"
+
+struct ramxlat {
+       int id;
+       u8 enc;
+};
+
+static inline int
+ramxlat(const struct ramxlat *xlat, int id)
+{
+       while (xlat->id >= 0) {
+               if (xlat->id == id)
+                       return xlat->enc;
+               xlat++;
+       }
+       return -EINVAL;
+}
+
+static const struct ramxlat
+ramgddr3_cl_lo[] = {
+       { 7, 7 }, { 8, 0 }, { 9, 1 }, { 10, 2 }, { 11, 3 },
+       /* the below are mentioned in some, but not all, gddr3 docs */
+       { 12, 4 }, { 13, 5 }, { 14, 6 },
+       /* XXX: Per Samsung docs, are these used? They overlap with Qimonda */
+       /* { 4, 4 }, { 5, 5 }, { 6, 6 }, { 12, 8 }, { 13, 9 }, { 14, 10 },
+        * { 15, 11 }, */
+       { -1 }
+};
+
+static const struct ramxlat
+ramgddr3_cl_hi[] = {
+       { 10, 2 }, { 11, 3 }, { 12, 4 }, { 13, 5 }, { 14, 6 }, { 15, 7 },
+       { 16, 0 }, { 17, 1 },
+       { -1 }
+};
+
+static const struct ramxlat
+ramgddr3_wr_lo[] = {
+       { 5, 2 }, { 7, 4 }, { 8, 5 }, { 9, 6 }, { 10, 7 },
+       { 11, 0 },
+       /* the below are mentioned in some, but not all, gddr3 docs */
+       { 4, 1 }, { 6, 3 }, { 12, 1 }, { 13 , 2 },
+       { -1 }
+};
+
+int
+nouveau_gddr3_calc(struct nouveau_ram *ram)
+{
+       int CL, WR, CWL, DLL = 0, ODT = 0, hi;
+
+       switch (ram->next->bios.timing_ver) {
+       case 0x10:
+               CWL = ram->next->bios.timing_10_CWL;
+               CL  = ram->next->bios.timing_10_CL;
+               WR  = ram->next->bios.timing_10_WR;
+               DLL = !ram->next->bios.ramcfg_10_DLLoff;
+               ODT = ram->next->bios.timing_10_ODT;
+               break;
+       case 0x20:
+               CWL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
+               CL  = (ram->next->bios.timing[1] & 0x0000001f) >> 0;
+               WR  = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
+               /* XXX: Get these values from the VBIOS instead */
+               DLL = !(ram->mr[1] & 0x1);
+               ODT =  (ram->mr[1] & 0x004) >> 2 |
+                      (ram->mr[1] & 0x040) >> 5 |
+                      (ram->mr[1] & 0x200) >> 7;
+               break;
+       default:
+               return -ENOSYS;
+       }
+
+       hi = ram->mr[2] & 0x1;
+       CL  = ramxlat(hi ? ramgddr3_cl_hi : ramgddr3_cl_lo, CL);
+       WR  = ramxlat(ramgddr3_wr_lo, WR);
+       if (CL < 0 || CWL < 1 || CWL > 7 || WR < 0)
+               return -EINVAL;
+
+       ram->mr[0] &= ~0xf74;
+       ram->mr[0] |= (CWL & 0x07) << 9;
+       ram->mr[0] |= (CL & 0x07) << 4;
+       ram->mr[0] |= (CL & 0x08) >> 1;
+
+       ram->mr[1] &= ~0x3fc;
+       ram->mr[1] |= (ODT & 0x03) << 2;
+       ram->mr[1] |= (ODT & 0x03) << 8;
+       ram->mr[1] |= (WR  & 0x03) << 4;
+       ram->mr[1] |= (WR  & 0x04) << 5;
+       ram->mr[1] |= !DLL << 6;
+       return 0;
+}
index a16024a747717e881d0d4e1464a02b1f6cdac45a..fde42e4d1b56002b733bac6f7d37ca90d6d09328 100644 (file)
@@ -26,6 +26,20 @@ struct gk20a_fb_priv {
        struct nouveau_fb base;
 };
 
+static int
+gk20a_fb_init(struct nouveau_object *object)
+{
+       struct gk20a_fb_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_fb_init(&priv->base);
+       if (ret)
+               return ret;
+
+       nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
+       return 0;
+}
+
 static int
 gk20a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
             struct nouveau_oclass *oclass, void *data, u32 size,
@@ -48,7 +62,7 @@ gk20a_fb_oclass = &(struct nouveau_fb_impl) {
        .base.ofuncs = &(struct nouveau_ofuncs) {
                .ctor = gk20a_fb_ctor,
                .dtor = _nouveau_fb_dtor,
-               .init = _nouveau_fb_init,
+               .init = gk20a_fb_init,
                .fini = _nouveau_fb_fini,
        },
        .memtype = nvc0_fb_memtype_valid,
index 60322e906dd403c5b2d206c4590605f612d4ba0d..283863f7aa9bd58ba9403773b40cc28e77aab8c0 100644 (file)
@@ -37,6 +37,7 @@ extern struct nouveau_oclass gm107_ram_oclass;
 
 int nouveau_sddr2_calc(struct nouveau_ram *ram);
 int nouveau_sddr3_calc(struct nouveau_ram *ram);
+int nouveau_gddr3_calc(struct nouveau_ram *ram);
 int nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts);
 
 #define nouveau_fb_create(p,e,c,d)                                             \
index d1fbbe4b00a2a4ef1eeccef2f43b00cfa02c096d..0ac7256443bb0f2e1745052479e233087fb89bb8 100644 (file)
@@ -140,6 +140,20 @@ ramfuc_wait_vblank(struct ramfuc *ram)
        nouveau_memx_wait_vblank(ram->memx);
 }
 
+static inline void
+ramfuc_train(struct ramfuc *ram)
+{
+       nouveau_memx_train(ram->memx);
+}
+
+static inline int
+ramfuc_train_result(struct nouveau_fb *pfb, u32 *result, u32 rsize)
+{
+       struct nouveau_pwr *ppwr = nouveau_pwr(pfb);
+
+       return nouveau_memx_train_result(ppwr, result, rsize);
+}
+
 static inline void
 ramfuc_block(struct ramfuc *ram)
 {
@@ -162,6 +176,8 @@ ramfuc_unblock(struct ramfuc *ram)
 #define ram_wait(s,r,m,d,n)  ramfuc_wait(&(s)->base, (r), (m), (d), (n))
 #define ram_nsec(s,n)        ramfuc_nsec(&(s)->base, (n))
 #define ram_wait_vblank(s)   ramfuc_wait_vblank(&(s)->base)
+#define ram_train(s)         ramfuc_train(&(s)->base)
+#define ram_train_result(s,r,l) ramfuc_train_result((s), (r), (l))
 #define ram_block(s)         ramfuc_block(&(s)->base)
 #define ram_unblock(s)       ramfuc_unblock(&(s)->base)
 
index 3601deca0bd5d01dd81920fc789aa5a96e148950..3b38a538845d70fc9b714c6cda72740942b4dec6 100644 (file)
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  * Authors: Ben Skeggs
+ *         Roy Spliet <rspliet@eclipso.eu>
  */
 
 #include <subdev/bios.h>
 #include <subdev/bios/bit.h>
 #include <subdev/bios/pll.h>
 #include <subdev/bios/rammap.h>
+#include <subdev/bios/M0205.h>
 #include <subdev/bios/timing.h>
 
 #include <subdev/clock/nva3.h>
 #include <subdev/clock/pll.h>
 
+#include <subdev/gpio.h>
+
+#include <subdev/timer.h>
+
+#include <engine/fifo.h>
+
 #include <core/option.h>
 
 #include "ramfuc.h"
 
 #include "nv50.h"
 
+/* XXX: Remove when memx gains GPIO support */
+extern int nv50_gpio_location(int line, u32 *reg, u32 *shift);
+
 struct nva3_ramfuc {
        struct ramfuc base;
+       struct ramfuc_reg r_0x001610;
+       struct ramfuc_reg r_0x001700;
+       struct ramfuc_reg r_0x002504;
        struct ramfuc_reg r_0x004000;
        struct ramfuc_reg r_0x004004;
        struct ramfuc_reg r_0x004018;
        struct ramfuc_reg r_0x004128;
        struct ramfuc_reg r_0x004168;
+       struct ramfuc_reg r_0x100080;
        struct ramfuc_reg r_0x100200;
        struct ramfuc_reg r_0x100210;
        struct ramfuc_reg r_0x100220[9];
+       struct ramfuc_reg r_0x100264;
        struct ramfuc_reg r_0x1002d0;
        struct ramfuc_reg r_0x1002d4;
        struct ramfuc_reg r_0x1002dc;
        struct ramfuc_reg r_0x10053c;
        struct ramfuc_reg r_0x1005a0;
        struct ramfuc_reg r_0x1005a4;
+       struct ramfuc_reg r_0x100700;
        struct ramfuc_reg r_0x100714;
        struct ramfuc_reg r_0x100718;
        struct ramfuc_reg r_0x10071c;
+       struct ramfuc_reg r_0x100720;
        struct ramfuc_reg r_0x100760;
        struct ramfuc_reg r_0x1007a0;
        struct ramfuc_reg r_0x1007e0;
+       struct ramfuc_reg r_0x100da0;
        struct ramfuc_reg r_0x10f804;
        struct ramfuc_reg r_0x1110e0;
        struct ramfuc_reg r_0x111100;
        struct ramfuc_reg r_0x111104;
+       struct ramfuc_reg r_0x1111e0;
+       struct ramfuc_reg r_0x111400;
        struct ramfuc_reg r_0x611200;
        struct ramfuc_reg r_mr[4];
+       struct ramfuc_reg r_gpioFBVREF;
+};
+
+struct nva3_ltrain {
+       enum {
+               NVA3_TRAIN_UNKNOWN,
+               NVA3_TRAIN_UNSUPPORTED,
+               NVA3_TRAIN_ONCE,
+               NVA3_TRAIN_EXEC,
+               NVA3_TRAIN_DONE
+       } state;
+       u32 r_100720;
+       u32 r_1111e0;
+       u32 r_111400;
+       struct nouveau_mem *mem;
 };
 
 struct nva3_ram {
        struct nouveau_ram base;
        struct nva3_ramfuc fuc;
+       struct nva3_ltrain ltrain;
 };
 
+void
+nva3_link_train_calc(u32 *vals, struct nva3_ltrain *train)
+{
+       int i, lo, hi;
+       u8 median[8], bins[4] = {0, 0, 0, 0}, bin = 0, qty = 0;
+
+       for (i = 0; i < 8; i++) {
+               for (lo = 0; lo < 0x40; lo++) {
+                       if (!(vals[lo] & 0x80000000))
+                               continue;
+                       if (vals[lo] & (0x101 << i))
+                               break;
+               }
+
+               if (lo == 0x40)
+                       return;
+
+               for (hi = lo + 1; hi < 0x40; hi++) {
+                       if (!(vals[lo] & 0x80000000))
+                               continue;
+                       if (!(vals[hi] & (0x101 << i))) {
+                               hi--;
+                               break;
+                       }
+               }
+
+               median[i] = ((hi - lo) >> 1) + lo;
+               bins[(median[i] & 0xf0) >> 4]++;
+               median[i] += 0x30;
+       }
+
+       /* Find the best value for 0x1111e0 */
+       for (i = 0; i < 4; i++) {
+               if (bins[i] > qty) {
+                       bin = i + 3;
+                       qty = bins[i];
+               }
+       }
+
+       train->r_100720 = 0;
+       for (i = 0; i < 8; i++) {
+               median[i] = max(median[i], (u8) (bin << 4));
+               median[i] = min(median[i], (u8) ((bin << 4) | 0xf));
+
+               train->r_100720 |= ((median[i] & 0x0f) << (i << 2));
+       }
+
+       train->r_1111e0 = 0x02000000 | (bin * 0x101);
+       train->r_111400 = 0x0;
+}
+
+/*
+ * Link training for (at least) DDR3
+ */
+int
+nva3_link_train(struct nouveau_fb *pfb)
+{
+       struct nouveau_bios *bios = nouveau_bios(pfb);
+       struct nva3_ram *ram = (void *)pfb->ram;
+       struct nouveau_clock *clk = nouveau_clock(pfb);
+       struct nva3_ltrain *train = &ram->ltrain;
+       struct nouveau_device *device = nv_device(pfb);
+       struct nva3_ramfuc *fuc = &ram->fuc;
+       u32 *result, r1700;
+       int ret, i;
+       struct nvbios_M0205T M0205T = { 0 };
+       u8 ver, hdr, cnt, len, snr, ssz;
+       unsigned int clk_current;
+       unsigned long flags;
+       unsigned long *f = &flags;
+
+       if (nouveau_boolopt(device->cfgopt, "NvMemExec", true) != true)
+               return -ENOSYS;
+
+       /* XXX: Multiple partitions? */
+       result = kmalloc(64 * sizeof(u32), GFP_KERNEL);
+       if (!result)
+               return -ENOMEM;
+
+       train->state = NVA3_TRAIN_EXEC;
+
+       /* Clock speeds for training and back */
+       nvbios_M0205Tp(bios, &ver, &hdr, &cnt, &len, &snr, &ssz, &M0205T);
+       if (M0205T.freq == 0)
+               return -ENOENT;
+
+       clk_current = clk->read(clk, nv_clk_src_mem);
+
+       ret = nva3_clock_pre(clk, f);
+       if (ret)
+               goto out;
+
+       /* First: clock up/down */
+       ret = ram->base.calc(pfb, (u32) M0205T.freq * 1000);
+       if (ret)
+               goto out;
+
+       /* Do this *after* calc, eliminates write in script */
+       nv_wr32(pfb, 0x111400, 0x00000000);
+       /* XXX: Magic writes that improve train reliability? */
+       nv_mask(pfb, 0x100674, 0x0000ffff, 0x00000000);
+       nv_mask(pfb, 0x1005e4, 0x0000ffff, 0x00000000);
+       nv_mask(pfb, 0x100b0c, 0x000000ff, 0x00000000);
+       nv_wr32(pfb, 0x100c04, 0x00000400);
+
+       /* Now the training script */
+       r1700 = ram_rd32(fuc, 0x001700);
+
+       ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
+       ram_wr32(fuc, 0x611200, 0x3300);
+       ram_wait_vblank(fuc);
+       ram_wait(fuc, 0x611200, 0x00000003, 0x00000000, 500000);
+       ram_mask(fuc, 0x001610, 0x00000083, 0x00000003);
+       ram_mask(fuc, 0x100080, 0x00000020, 0x00000000);
+       ram_mask(fuc, 0x10f804, 0x80000000, 0x00000000);
+       ram_wr32(fuc, 0x001700, 0x00000000);
+
+       ram_train(fuc);
+
+       /* Reset */
+       ram_mask(fuc, 0x10f804, 0x80000000, 0x80000000);
+       ram_wr32(fuc, 0x10053c, 0x0);
+       ram_wr32(fuc, 0x100720, train->r_100720);
+       ram_wr32(fuc, 0x1111e0, train->r_1111e0);
+       ram_wr32(fuc, 0x111400, train->r_111400);
+       ram_nuke(fuc, 0x100080);
+       ram_mask(fuc, 0x100080, 0x00000020, 0x00000020);
+       ram_nsec(fuc, 1000);
+
+       ram_wr32(fuc, 0x001700, r1700);
+       ram_mask(fuc, 0x001610, 0x00000083, 0x00000080);
+       ram_wr32(fuc, 0x611200, 0x3330);
+       ram_mask(fuc, 0x100200, 0x00000800, 0x00000800);
+
+       ram_exec(fuc, true);
+
+       ram->base.calc(pfb, clk_current);
+       ram_exec(fuc, true);
+
+       /* Post-processing, avoids flicker */
+       nv_mask(pfb, 0x616308, 0x10, 0x10);
+       nv_mask(pfb, 0x616b08, 0x10, 0x10);
+
+       nva3_clock_post(clk, f);
+
+       ram_train_result(pfb, result, 64);
+       for (i = 0; i < 64; i++)
+               nv_debug(pfb, "Train: %08x", result[i]);
+       nva3_link_train_calc(result, train);
+
+       nv_debug(pfb, "Train: %08x %08x %08x", train->r_100720,
+                       train->r_1111e0, train->r_111400);
+
+       kfree(result);
+
+       train->state = NVA3_TRAIN_DONE;
+
+       return ret;
+
+out:
+       if(ret == -EBUSY)
+               f = NULL;
+
+       train->state = NVA3_TRAIN_UNSUPPORTED;
+
+       nva3_clock_post(clk, f);
+       return ret;
+}
+
+int
+nva3_link_train_init(struct nouveau_fb *pfb)
+{
+       static const u32 pattern[16] = {
+               0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
+               0x00000000, 0x11111111, 0x44444444, 0xdddddddd,
+               0x33333333, 0x55555555, 0x77777777, 0x66666666,
+               0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
+       };
+       struct nouveau_bios *bios = nouveau_bios(pfb);
+       struct nva3_ram *ram = (void *)pfb->ram;
+       struct nva3_ltrain *train = &ram->ltrain;
+       struct nouveau_mem *mem;
+       struct nvbios_M0205E M0205E;
+       u8 ver, hdr, cnt, len;
+       u32 r001700;
+       int ret, i = 0;
+
+       train->state = NVA3_TRAIN_UNSUPPORTED;
+
+       /* We support type "5"
+        * XXX: training pattern table appears to be unused for this routine */
+       if (!nvbios_M0205Ep(bios, i, &ver, &hdr, &cnt, &len, &M0205E))
+               return -ENOENT;
+
+       if (M0205E.type != 5)
+               return 0;
+
+       train->state = NVA3_TRAIN_ONCE;
+
+       ret = pfb->ram->get(pfb, 0x8000, 0x10000, 0, 0x800, &ram->ltrain.mem);
+       if (ret)
+               return ret;
+
+       mem = ram->ltrain.mem;
+
+       nv_wr32(pfb, 0x100538, 0x10000000 | (mem->offset >> 16));
+       nv_wr32(pfb, 0x1005a8, 0x0000ffff);
+       nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
+
+       for (i = 0; i < 0x30; i++) {
+               nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
+               nv_wr32(pfb, 0x10f900, pattern[i % 16]);
+       }
+
+       for (i = 0; i < 0x30; i++) {
+               nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
+               nv_wr32(pfb, 0x10f920, pattern[i % 16]);
+       }
+
+       /* And upload the pattern */
+       r001700 = nv_rd32(pfb, 0x1700);
+       nv_wr32(pfb, 0x1700, mem->offset >> 16);
+       for (i = 0; i < 16; i++)
+               nv_wr32(pfb, 0x700000 + (i << 2), pattern[i]);
+       for (i = 0; i < 16; i++)
+               nv_wr32(pfb, 0x700100 + (i << 2), pattern[i]);
+       nv_wr32(pfb, 0x1700, r001700);
+
+       train->r_100720 = nv_rd32(pfb, 0x100720);
+       train->r_1111e0 = nv_rd32(pfb, 0x1111e0);
+       train->r_111400 = nv_rd32(pfb, 0x111400);
+
+       return 0;
+}
+
+void
+nva3_link_train_fini(struct nouveau_fb *pfb)
+{
+       struct nva3_ram *ram = (void *)pfb->ram;
+
+       if (ram->ltrain.mem)
+               pfb->ram->put(pfb, &ram->ltrain.mem);
+}
+
+/*
+ * RAM reclocking
+ */
+#define T(t) cfg->timing_10_##t
+static int
+nva3_ram_timing_calc(struct nouveau_fb *pfb, u32 *timing)
+{
+       struct nva3_ram *ram = (void *)pfb->ram;
+       struct nvbios_ramcfg *cfg = &ram->base.target.bios;
+       int tUNK_base, tUNK_40_0, prevCL;
+       u32 cur2, cur3, cur7, cur8;
+
+       cur2 = nv_rd32(pfb, 0x100228);
+       cur3 = nv_rd32(pfb, 0x10022c);
+       cur7 = nv_rd32(pfb, 0x10023c);
+       cur8 = nv_rd32(pfb, 0x100240);
+
+
+       switch ((!T(CWL)) * ram->base.type) {
+       case NV_MEM_TYPE_DDR2:
+               T(CWL) = T(CL) - 1;
+               break;
+       case NV_MEM_TYPE_GDDR3:
+               T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
+               break;
+       }
+
+       prevCL = (cur3 & 0x000000ff) + 1;
+       tUNK_base = ((cur7 & 0x00ff0000) >> 16) - prevCL;
+
+       timing[0] = (T(RP) << 24 | T(RAS) << 16 | T(RFC) << 8 | T(RC));
+       timing[1] = (T(WR) + 1 + T(CWL)) << 24 |
+                   max_t(u8,T(18), 1) << 16 |
+                   (T(WTR) + 1 + T(CWL)) << 8 |
+                   (5 + T(CL) - T(CWL));
+       timing[2] = (T(CWL) - 1) << 24 |
+                   (T(RRD) << 16) |
+                   (T(RCDWR) << 8) |
+                   T(RCDRD);
+       timing[3] = (cur3 & 0x00ff0000) |
+                   (0x30 + T(CL)) << 24 |
+                   (0xb + T(CL)) << 8 |
+                   (T(CL) - 1);
+       timing[4] = T(20) << 24 |
+                   T(21) << 16 |
+                   T(13) << 8 |
+                   T(13);
+       timing[5] = T(RFC) << 24 |
+                   max_t(u8,T(RCDRD), T(RCDWR)) << 16 |
+                   max_t(u8, (T(CWL) + 6), (T(CL) + 2)) << 8 |
+                   T(RP);
+       timing[6] = (0x5a + T(CL)) << 16 |
+                   max_t(u8, 1, (6 - T(CL) + T(CWL))) << 8 |
+                   (0x50 + T(CL) - T(CWL));
+       timing[7] = (cur7 & 0xff000000) |
+                   ((tUNK_base + T(CL)) << 16) |
+                   0x202;
+       timing[8] = cur8 & 0xffffff00;
+
+       switch (ram->base.type) {
+       case NV_MEM_TYPE_DDR2:
+       case NV_MEM_TYPE_GDDR3:
+               tUNK_40_0 = prevCL - (cur8 & 0xff);
+               if (tUNK_40_0 > 0)
+                       timing[8] |= T(CL);
+               break;
+       default:
+               break;
+       }
+
+       nv_debug(pfb, "Entry: 220: %08x %08x %08x %08x\n",
+                       timing[0], timing[1], timing[2], timing[3]);
+       nv_debug(pfb, "  230: %08x %08x %08x %08x\n",
+                       timing[4], timing[5], timing[6], timing[7]);
+       nv_debug(pfb, "  240: %08x\n", timing[8]);
+       return 0;
+}
+#undef T
+
+static void
+nouveau_sddr2_dll_reset(struct nva3_ramfuc *fuc)
+{
+       ram_mask(fuc, mr[0], 0x100, 0x100);
+       ram_nsec(fuc, 1000);
+       ram_mask(fuc, mr[0], 0x100, 0x000);
+       ram_nsec(fuc, 1000);
+}
+
+static void
+nouveau_sddr3_dll_disable(struct nva3_ramfuc *fuc, u32 *mr)
+{
+       u32 mr1_old = ram_rd32(fuc, mr[1]);
+
+       if (!(mr1_old & 0x1)) {
+               ram_wr32(fuc, 0x1002d4, 0x00000001);
+               ram_wr32(fuc, mr[1], mr[1]);
+               ram_nsec(fuc, 1000);
+       }
+}
+
+static void
+nouveau_gddr3_dll_disable(struct nva3_ramfuc *fuc, u32 *mr)
+{
+       u32 mr1_old = ram_rd32(fuc, mr[1]);
+
+       if (!(mr1_old & 0x40)) {
+               ram_wr32(fuc, mr[1], mr[1]);
+               ram_nsec(fuc, 1000);
+       }
+}
+
+static void
+nva3_ram_lock_pll(struct nva3_ramfuc *fuc, struct nva3_clock_info *mclk)
+{
+       ram_wr32(fuc, 0x004004, mclk->pll);
+       ram_mask(fuc, 0x004000, 0x00000001, 0x00000001);
+       ram_mask(fuc, 0x004000, 0x00000010, 0x00000000);
+       ram_wait(fuc, 0x004000, 0x00020000, 0x00020000, 64000);
+       ram_mask(fuc, 0x004000, 0x00000010, 0x00000010);
+}
+
+static void
+nva3_ram_fbvref(struct nva3_ramfuc *fuc, u32 val)
+{
+       struct nouveau_gpio *gpio = nouveau_gpio(fuc->base.pfb);
+       struct dcb_gpio_func func;
+       u32 reg, sh, gpio_val;
+       int ret;
+
+       if (gpio->get(gpio, 0, 0x2e, DCB_GPIO_UNUSED) != val) {
+               ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+               if (ret)
+                       return;
+
+               nv50_gpio_location(func.line, &reg, &sh);
+               gpio_val = ram_rd32(fuc, gpioFBVREF);
+               if (gpio_val & (8 << sh))
+                       val = !val;
+
+               ram_mask(fuc, gpioFBVREF, (0x3 << sh), ((val | 0x2) << sh));
+               ram_nsec(fuc, 20000);
+       }
+}
+
 static int
 nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
 {
        struct nouveau_bios *bios = nouveau_bios(pfb);
        struct nva3_ram *ram = (void *)pfb->ram;
        struct nva3_ramfuc *fuc = &ram->fuc;
+       struct nva3_ltrain *train = &ram->ltrain;
        struct nva3_clock_info mclk;
        struct nouveau_ram_data *next;
        u8  ver, hdr, cnt, len, strap;
        u32 data;
-       u32 r004018, r100760, ctrl;
+       u32 r004018, r100760, r100da0, r111100, ctrl;
        u32 unk714, unk718, unk71c;
        int ret, i;
+       u32 timing[9];
+       bool pll2pll;
 
        next = &ram->base.target;
        next->freq = freq;
        ram->base.next = next;
 
+       if (ram->ltrain.state == NVA3_TRAIN_ONCE)
+               nva3_link_train(pfb);
+
        /* lookup memory config data relevant to the target frequency */
        i = 0;
-       while ((data = nvbios_rammapEp(bios, i++, &ver, &hdr, &cnt, &len,
-                                     &next->bios))) {
-               if (freq / 1000 >= next->bios.rammap_min &&
-                   freq / 1000 <= next->bios.rammap_max)
-                       break;
-       }
-
-       if (!data || ver != 0x10 || hdr < 0x0e) {
+       data = nvbios_rammapEm(bios, freq / 1000, &ver, &hdr, &cnt, &len,
+                                     &next->bios);
+       if (!data || ver != 0x10 || hdr < 0x05) {
                nv_error(pfb, "invalid/missing rammap entry\n");
                return -EINVAL;
        }
@@ -113,7 +539,7 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
 
        data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, strap,
                               &ver, &hdr, &next->bios);
-       if (!data || ver != 0x10 || hdr < 0x0e) {
+       if (!data || ver != 0x10 || hdr < 0x09) {
                nv_error(pfb, "invalid/missing ramcfg entry\n");
                return -EINVAL;
        }
@@ -123,7 +549,7 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
                data = nvbios_timingEp(bios, next->bios.ramcfg_timing,
                                       &ver, &hdr, &cnt, &len,
                                       &next->bios);
-               if (!data || ver != 0x10 || hdr < 0x19) {
+               if (!data || ver != 0x10 || hdr < 0x17) {
                        nv_error(pfb, "invalid/missing timing entry\n");
                        return -EINVAL;
                }
@@ -135,7 +561,32 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
                return ret;
        }
 
+       nva3_ram_timing_calc(pfb, timing);
+
        ret = ram_init(fuc, pfb);
+       if (ret)
+               return ret;
+
+       /* Determine ram-specific MR values */
+       ram->base.mr[0] = ram_rd32(fuc, mr[0]);
+       ram->base.mr[1] = ram_rd32(fuc, mr[1]);
+       ram->base.mr[2] = ram_rd32(fuc, mr[2]);
+
+       switch (ram->base.type) {
+       case NV_MEM_TYPE_DDR2:
+               ret = nouveau_sddr2_calc(&ram->base);
+               break;
+       case NV_MEM_TYPE_DDR3:
+               ret = nouveau_sddr3_calc(&ram->base);
+               break;
+       case NV_MEM_TYPE_GDDR3:
+               ret = nouveau_gddr3_calc(&ram->base);
+               break;
+       default:
+               ret = -ENOSYS;
+               break;
+       }
+
        if (ret)
                return ret;
 
@@ -143,45 +594,66 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
        if (freq <= 750000) {
                r004018 = 0x10000000;
                r100760 = 0x22222222;
+               r100da0 = 0x00000010;
        } else {
                r004018 = 0x00000000;
                r100760 = 0x00000000;
+               r100da0 = 0x00000000;
        }
 
+       if (!next->bios.ramcfg_10_DLLoff)
+               r004018 |= 0x00004000;
+
+       /* pll2pll requires to switch to a safe clock first */
        ctrl = ram_rd32(fuc, 0x004000);
-       if (ctrl & 0x00000008) {
-               if (mclk.pll) {
-                       ram_mask(fuc, 0x004128, 0x00000101, 0x00000101);
-                       ram_wr32(fuc, 0x004004, mclk.pll);
-                       ram_wr32(fuc, 0x004000, (ctrl |= 0x00000001));
-                       ram_wr32(fuc, 0x004000, (ctrl &= 0xffffffef));
-                       ram_wait(fuc, 0x004000, 0x00020000, 0x00020000, 64000);
-                       ram_wr32(fuc, 0x004000, (ctrl |= 0x00000010));
-                       ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
-                       ram_wr32(fuc, 0x004000, (ctrl |= 0x00000004));
-               }
-       } else {
-               u32 ssel = 0x00000101;
-               if (mclk.clk)
-                       ssel |= mclk.clk;
-               else
-                       ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
-               ram_mask(fuc, 0x004168, 0x003f3141, ctrl);
-       }
+       pll2pll = (!(ctrl & 0x00000008)) && mclk.pll;
 
+       /* Pre, NVIDIA does this outside the script */
        if (next->bios.ramcfg_10_02_10) {
                ram_mask(fuc, 0x111104, 0x00000600, 0x00000000);
        } else {
                ram_mask(fuc, 0x111100, 0x40000000, 0x40000000);
                ram_mask(fuc, 0x111104, 0x00000180, 0x00000000);
        }
+       /* Always disable this bit during reclock */
+       ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
+
+       /* If switching from non-pll to pll, lock before disabling FB */
+       if (mclk.pll && !pll2pll) {
+               ram_mask(fuc, 0x004128, 0x003f3141, mclk.clk | 0x00000101);
+               nva3_ram_lock_pll(fuc, &mclk);
+       }
+
+       /* Start with disabling some CRTCs and PFIFO? */
+       ram_wait_vblank(fuc);
+       ram_wr32(fuc, 0x611200, 0x3300);
+       ram_mask(fuc, 0x002504, 0x1, 0x1);
+       ram_nsec(fuc, 10000);
+       ram_wait(fuc, 0x002504, 0x10, 0x10, 20000); /* XXX: or longer? */
+       ram_block(fuc);
+       ram_nsec(fuc, 2000);
+
+       if (!next->bios.ramcfg_10_02_10) {
+               if (ram->base.type == NV_MEM_TYPE_GDDR3)
+                       ram_mask(fuc, 0x111100, 0x04020000, 0x00020000);
+               else
+                       ram_mask(fuc, 0x111100, 0x04020000, 0x04020000);
+       }
+
+       /* If we're disabling the DLL, do it now */
+       switch (next->bios.ramcfg_10_DLLoff * ram->base.type) {
+       case NV_MEM_TYPE_DDR3:
+               nouveau_sddr3_dll_disable(fuc, ram->base.mr);
+               break;
+       case NV_MEM_TYPE_GDDR3:
+               nouveau_gddr3_dll_disable(fuc, ram->base.mr);
+               break;
+       }
 
-       if (!next->bios.rammap_10_04_02)
-               ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
-       ram_wr32(fuc, 0x611200, 0x00003300);
-       if (!next->bios.ramcfg_10_02_10)
-               ram_wr32(fuc, 0x111100, 0x4c020000); /*XXX*/
+       if (fuc->r_gpioFBVREF.addr && next->bios.timing_10_ODT)
+               nva3_ram_fbvref(fuc, 0);
 
+       /* Brace RAM for impact */
        ram_wr32(fuc, 0x1002d4, 0x00000001);
        ram_wr32(fuc, 0x1002d0, 0x00000001);
        ram_wr32(fuc, 0x1002d0, 0x00000001);
@@ -189,24 +661,38 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
        ram_wr32(fuc, 0x1002dc, 0x00000001);
        ram_nsec(fuc, 2000);
 
-       ctrl = ram_rd32(fuc, 0x004000);
-       if (!(ctrl & 0x00000008) && mclk.pll) {
-               ram_wr32(fuc, 0x004000, (ctrl |=  0x00000008));
+       if (nv_device(pfb)->chipset == 0xa3 && freq <= 500000)
+               ram_mask(fuc, 0x100700, 0x00000006, 0x00000006);
+
+       /* Fiddle with clocks */
+       /* There's 4 scenario's
+        * pll->pll: first switch to a 324MHz clock, set up new PLL, switch
+        * clk->pll: Set up new PLL, switch
+        * pll->clk: Set up clock, switch
+        * clk->clk: Overwrite ctrl and other bits, switch */
+
+       /* Switch to regular clock - 324MHz */
+       if (pll2pll) {
+               ram_mask(fuc, 0x004000, 0x00000004, 0x00000004);
+               ram_mask(fuc, 0x004168, 0x003f3141, 0x00083101);
+               ram_mask(fuc, 0x004000, 0x00000008, 0x00000008);
                ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
                ram_wr32(fuc, 0x004018, 0x00001000);
-               ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000001));
-               ram_wr32(fuc, 0x004004, mclk.pll);
-               ram_wr32(fuc, 0x004000, (ctrl |=  0x00000001));
-               udelay(64);
-               ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
-               udelay(20);
-       } else
-       if (!mclk.pll) {
-               ram_mask(fuc, 0x004168, 0x003f3040, mclk.clk);
-               ram_wr32(fuc, 0x004000, (ctrl |= 0x00000008));
+               nva3_ram_lock_pll(fuc, &mclk);
+       }
+
+       if (mclk.pll) {
+               ram_mask(fuc, 0x004000, 0x00000105, 0x00000105);
+               ram_wr32(fuc, 0x004018, 0x00001000 | r004018);
+               ram_wr32(fuc, 0x100da0, r100da0);
+       } else {
+               ram_mask(fuc, 0x004168, 0x003f3141, mclk.clk | 0x00000101);
+               ram_mask(fuc, 0x004000, 0x00000108, 0x00000008);
                ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
-               ram_wr32(fuc, 0x004018, 0x0000d000 | r004018);
+               ram_wr32(fuc, 0x004018, 0x00009000 | r004018);
+               ram_wr32(fuc, 0x100da0, r100da0);
        }
+       ram_nsec(fuc, 20000);
 
        if (next->bios.rammap_10_04_08) {
                ram_wr32(fuc, 0x1005a0, next->bios.ramcfg_10_06 << 16 |
@@ -220,6 +706,12 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
                                        0x80000000);
                ram_mask(fuc, 0x10053c, 0x00001000, 0x00000000);
        } else {
+               if (train->state == NVA3_TRAIN_DONE) {
+                       ram_wr32(fuc, 0x100080, 0x1020);
+                       ram_mask(fuc, 0x111400, 0xffffffff, train->r_111400);
+                       ram_mask(fuc, 0x1111e0, 0xffffffff, train->r_1111e0);
+                       ram_mask(fuc, 0x100720, 0xffffffff, train->r_100720);
+               }
                ram_mask(fuc, 0x10053c, 0x00001000, 0x00001000);
                ram_mask(fuc, 0x10f804, 0x80000000, 0x00000000);
                ram_mask(fuc, 0x100760, 0x22222222, r100760);
@@ -227,65 +719,131 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
                ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
        }
 
+       if (nv_device(pfb)->chipset == 0xa3 && freq > 500000) {
+               ram_mask(fuc, 0x100700, 0x00000006, 0x00000000);
+       }
+
+       /* Final switch */
        if (mclk.pll) {
                ram_mask(fuc, 0x1110e0, 0x00088000, 0x00011000);
-               ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000008));
+               ram_mask(fuc, 0x004000, 0x00000008, 0x00000000);
        }
 
-       /*XXX: LEAVE */
        ram_wr32(fuc, 0x1002dc, 0x00000000);
        ram_wr32(fuc, 0x1002d4, 0x00000001);
        ram_wr32(fuc, 0x100210, 0x80000000);
-       ram_nsec(fuc, 1000);
-       ram_nsec(fuc, 1000);
+       ram_nsec(fuc, 2000);
 
-       ram_mask(fuc, mr[2], 0x00000000, 0x00000000);
-       ram_nsec(fuc, 1000);
-       ram_nuke(fuc, mr[0]);
-       ram_mask(fuc, mr[0], 0x00000000, 0x00000000);
-       ram_nsec(fuc, 1000);
+       /* Set RAM MR parameters and timings */
+       for (i = 2; i >= 0; i--) {
+               if (ram_rd32(fuc, mr[i]) != ram->base.mr[i]) {
+                       ram_wr32(fuc, mr[i], ram->base.mr[i]);
+                       ram_nsec(fuc, 1000);
+               }
+       }
 
-       ram_mask(fuc, 0x100220[3], 0x00000000, 0x00000000);
-       ram_mask(fuc, 0x100220[1], 0x00000000, 0x00000000);
-       ram_mask(fuc, 0x100220[6], 0x00000000, 0x00000000);
-       ram_mask(fuc, 0x100220[7], 0x00000000, 0x00000000);
-       ram_mask(fuc, 0x100220[2], 0x00000000, 0x00000000);
-       ram_mask(fuc, 0x100220[4], 0x00000000, 0x00000000);
-       ram_mask(fuc, 0x100220[5], 0x00000000, 0x00000000);
-       ram_mask(fuc, 0x100220[0], 0x00000000, 0x00000000);
-       ram_mask(fuc, 0x100220[8], 0x00000000, 0x00000000);
+       ram_wr32(fuc, 0x100220[3], timing[3]);
+       ram_wr32(fuc, 0x100220[1], timing[1]);
+       ram_wr32(fuc, 0x100220[6], timing[6]);
+       ram_wr32(fuc, 0x100220[7], timing[7]);
+       ram_wr32(fuc, 0x100220[2], timing[2]);
+       ram_wr32(fuc, 0x100220[4], timing[4]);
+       ram_wr32(fuc, 0x100220[5], timing[5]);
+       ram_wr32(fuc, 0x100220[0], timing[0]);
+       ram_wr32(fuc, 0x100220[8], timing[8]);
 
+       /* Misc */
        ram_mask(fuc, 0x100200, 0x00001000, !next->bios.ramcfg_10_02_08 << 12);
 
-       unk714 = ram_rd32(fuc, 0x100714) & ~0xf0000010;
-       unk718 = ram_rd32(fuc, 0x100718) & ~0x00000100;
-       unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100;
+       /* XXX: A lot of "chipset"/"ram type" specific stuff...? */
+       unk714  = ram_rd32(fuc, 0x100714) & ~0xf0000130;
+       unk718  = ram_rd32(fuc, 0x100718) & ~0x00000100;
+       unk71c  = ram_rd32(fuc, 0x10071c) & ~0x00000100;
+       r111100 = ram_rd32(fuc, 0x111100) & ~0x3a800000;
+
+       if (next->bios.ramcfg_10_02_04) {
+               switch (ram->base.type) {
+               case NV_MEM_TYPE_DDR3:
+                       if (nv_device(pfb)->chipset != 0xa8)
+                               r111100 |= 0x00000004;
+                       /* no break */
+               case NV_MEM_TYPE_DDR2:
+                       r111100 |= 0x08000000;
+                       break;
+               default:
+                       break;
+               }
+       } else {
+               switch (ram->base.type) {
+               case NV_MEM_TYPE_DDR2:
+                       r111100 |= 0x1a800000;
+                       unk714  |= 0x00000010;
+                       break;
+               case NV_MEM_TYPE_DDR3:
+                       if (nv_device(pfb)->chipset == 0xa8) {
+                               r111100 |=  0x08000000;
+                       } else {
+                               r111100 &= ~0x00000004;
+                               r111100 |=  0x12800000;
+                       }
+                       unk714  |= 0x00000010;
+                       break;
+               case NV_MEM_TYPE_GDDR3:
+                       r111100 |= 0x30000000;
+                       unk714  |= 0x00000020;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       unk714 |= (next->bios.ramcfg_10_04_01) << 8;
+
        if (next->bios.ramcfg_10_02_20)
                unk714 |= 0xf0000000;
-       if (!next->bios.ramcfg_10_02_04)
-               unk714 |= 0x00000010;
-       ram_wr32(fuc, 0x100714, unk714);
-
+       if (next->bios.ramcfg_10_02_02)
+               unk718 |= 0x00000100;
        if (next->bios.ramcfg_10_02_01)
                unk71c |= 0x00000100;
-       ram_wr32(fuc, 0x10071c, unk71c);
+       if (next->bios.timing_10_24 != 0xff) {
+               unk718 &= ~0xf0000000;
+               unk718 |= next->bios.timing_10_24 << 28;
+       }
+       if (next->bios.ramcfg_10_02_10)
+               r111100 &= ~0x04020000;
 
-       if (next->bios.ramcfg_10_02_02)
-               unk718 |= 0x00000100;
-       ram_wr32(fuc, 0x100718, unk718);
+       ram_mask(fuc, 0x100714, 0xffffffff, unk714);
+       ram_mask(fuc, 0x10071c, 0xffffffff, unk71c);
+       ram_mask(fuc, 0x100718, 0xffffffff, unk718);
+       ram_mask(fuc, 0x111100, 0xffffffff, r111100);
 
-       if (next->bios.ramcfg_10_02_10)
-               ram_wr32(fuc, 0x111100, 0x48000000); /*XXX*/
+       if (fuc->r_gpioFBVREF.addr && !next->bios.timing_10_ODT)
+               nva3_ram_fbvref(fuc, 1);
 
-       ram_mask(fuc, mr[0], 0x100, 0x100);
-       ram_nsec(fuc, 1000);
-       ram_mask(fuc, mr[0], 0x100, 0x000);
-       ram_nsec(fuc, 1000);
+       /* Reset DLL */
+       if (!next->bios.ramcfg_10_DLLoff)
+               nouveau_sddr2_dll_reset(fuc);
 
-       ram_nsec(fuc, 2000);
-       ram_nsec(fuc, 12000);
+       if (ram->base.type == NV_MEM_TYPE_GDDR3) {
+               ram_nsec(fuc, 31000);
+       } else {
+               ram_nsec(fuc, 14000);
+       }
+
+       if (ram->base.type == NV_MEM_TYPE_DDR3) {
+               ram_wr32(fuc, 0x100264, 0x1);
+               ram_nsec(fuc, 2000);
+       }
 
-       ram_wr32(fuc, 0x611200, 0x00003330);
+       ram_nuke(fuc, 0x100700);
+       ram_mask(fuc, 0x100700, 0x01000000, 0x01000000);
+       ram_mask(fuc, 0x100700, 0x01000000, 0x00000000);
+
+       /* Re-enable FB */
+       ram_unblock(fuc);
+       ram_wr32(fuc, 0x611200, 0x3330);
+
+       /* Post fiddlings */
        if (next->bios.rammap_10_04_02)
                ram_mask(fuc, 0x100200, 0x00000800, 0x00000800);
        if (next->bios.ramcfg_10_02_10) {
@@ -313,7 +871,22 @@ nva3_ram_prog(struct nouveau_fb *pfb)
        struct nouveau_device *device = nv_device(pfb);
        struct nva3_ram *ram = (void *)pfb->ram;
        struct nva3_ramfuc *fuc = &ram->fuc;
-       ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", true));
+       bool exec = nouveau_boolopt(device->cfgopt, "NvMemExec", true);
+
+       if (exec) {
+               nv_mask(pfb, 0x001534, 0x2, 0x2);
+
+               ram_exec(fuc, true);
+
+               /* Post-processing, avoids flicker */
+               nv_mask(pfb, 0x002504, 0x1, 0x0);
+               nv_mask(pfb, 0x001534, 0x2, 0x0);
+
+               nv_mask(pfb, 0x616308, 0x10, 0x10);
+               nv_mask(pfb, 0x616b08, 0x10, 0x10);
+       } else {
+               ram_exec(fuc, false);
+       }
        return 0;
 }
 
@@ -330,38 +903,24 @@ nva3_ram_init(struct nouveau_object *object)
 {
        struct nouveau_fb *pfb = (void *)object->parent;
        struct nva3_ram   *ram = (void *)object;
-       int ret, i;
+       int ret;
 
        ret = nouveau_ram_init(&ram->base);
        if (ret)
                return ret;
 
-       /* prepare for ddr link training, and load training patterns */
-       switch (ram->base.type) {
-       case NV_MEM_TYPE_DDR3: {
-               if (nv_device(pfb)->chipset == 0xa8) {
-                       static const u32 pattern[16] = {
-                               0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
-                               0x00000000, 0x11111111, 0x44444444, 0xdddddddd,
-                               0x33333333, 0x55555555, 0x77777777, 0x66666666,
-                               0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
-                       };
-
-                       nv_wr32(pfb, 0x100538, 0x10001ff6); /*XXX*/
-                       nv_wr32(pfb, 0x1005a8, 0x0000ffff);
-                       nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
-                       for (i = 0; i < 0x30; i++) {
-                               nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
-                               nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
-                               nv_wr32(pfb, 0x10f900, pattern[i % 16]);
-                               nv_wr32(pfb, 0x10f920, pattern[i % 16]);
-                       }
-               }
-       }
-               break;
-       default:
-               break;
-       }
+       nva3_link_train_init(pfb);
+
+       return 0;
+}
+
+static int
+nva3_ram_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nouveau_fb *pfb = (void *)object->parent;
+
+       if (!suspend)
+               nva3_link_train_fini(pfb);
 
        return 0;
 }
@@ -371,8 +930,12 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
              struct nouveau_oclass *oclass, void *data, u32 datasize,
              struct nouveau_object **pobject)
 {
+       struct nouveau_fb *pfb = nouveau_fb(parent);
+       struct nouveau_gpio *gpio = nouveau_gpio(pfb);
+       struct dcb_gpio_func func;
        struct nva3_ram *ram;
        int ret, i;
+       u32 reg, shift;
 
        ret = nv50_ram_create(parent, engine, oclass, &ram);
        *pobject = nv_object(ram);
@@ -380,7 +943,9 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                return ret;
 
        switch (ram->base.type) {
+       case NV_MEM_TYPE_DDR2:
        case NV_MEM_TYPE_DDR3:
+       case NV_MEM_TYPE_GDDR3:
                ram->base.calc = nva3_ram_calc;
                ram->base.prog = nva3_ram_prog;
                ram->base.tidy = nva3_ram_tidy;
@@ -390,31 +955,41 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                return 0;
        }
 
+       ram->fuc.r_0x001610 = ramfuc_reg(0x001610);
+       ram->fuc.r_0x001700 = ramfuc_reg(0x001700);
+       ram->fuc.r_0x002504 = ramfuc_reg(0x002504);
        ram->fuc.r_0x004000 = ramfuc_reg(0x004000);
        ram->fuc.r_0x004004 = ramfuc_reg(0x004004);
        ram->fuc.r_0x004018 = ramfuc_reg(0x004018);
        ram->fuc.r_0x004128 = ramfuc_reg(0x004128);
        ram->fuc.r_0x004168 = ramfuc_reg(0x004168);
+       ram->fuc.r_0x100080 = ramfuc_reg(0x100080);
        ram->fuc.r_0x100200 = ramfuc_reg(0x100200);
        ram->fuc.r_0x100210 = ramfuc_reg(0x100210);
        for (i = 0; i < 9; i++)
                ram->fuc.r_0x100220[i] = ramfuc_reg(0x100220 + (i * 4));
+       ram->fuc.r_0x100264 = ramfuc_reg(0x100264);
        ram->fuc.r_0x1002d0 = ramfuc_reg(0x1002d0);
        ram->fuc.r_0x1002d4 = ramfuc_reg(0x1002d4);
        ram->fuc.r_0x1002dc = ramfuc_reg(0x1002dc);
        ram->fuc.r_0x10053c = ramfuc_reg(0x10053c);
        ram->fuc.r_0x1005a0 = ramfuc_reg(0x1005a0);
        ram->fuc.r_0x1005a4 = ramfuc_reg(0x1005a4);
+       ram->fuc.r_0x100700 = ramfuc_reg(0x100700);
        ram->fuc.r_0x100714 = ramfuc_reg(0x100714);
        ram->fuc.r_0x100718 = ramfuc_reg(0x100718);
        ram->fuc.r_0x10071c = ramfuc_reg(0x10071c);
+       ram->fuc.r_0x100720 = ramfuc_reg(0x100720);
        ram->fuc.r_0x100760 = ramfuc_stride(0x100760, 4, ram->base.part_mask);
        ram->fuc.r_0x1007a0 = ramfuc_stride(0x1007a0, 4, ram->base.part_mask);
        ram->fuc.r_0x1007e0 = ramfuc_stride(0x1007e0, 4, ram->base.part_mask);
+       ram->fuc.r_0x100da0 = ramfuc_stride(0x100da0, 4, ram->base.part_mask);
        ram->fuc.r_0x10f804 = ramfuc_reg(0x10f804);
        ram->fuc.r_0x1110e0 = ramfuc_stride(0x1110e0, 4, ram->base.part_mask);
        ram->fuc.r_0x111100 = ramfuc_reg(0x111100);
        ram->fuc.r_0x111104 = ramfuc_reg(0x111104);
+       ram->fuc.r_0x1111e0 = ramfuc_reg(0x1111e0);
+       ram->fuc.r_0x111400 = ramfuc_reg(0x111400);
        ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
 
        if (ram->base.ranks > 1) {
@@ -429,6 +1004,12 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4);
        }
 
+       ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+       if (ret == 0) {
+               nv50_gpio_location(func.line, &reg, &shift);
+               ram->fuc.r_gpioFBVREF = ramfuc_reg(reg);
+       }
+
        return 0;
 }
 
@@ -438,6 +1019,6 @@ nva3_ram_oclass = {
                .ctor = nva3_ram_ctor,
                .dtor = _nouveau_ram_dtor,
                .init = nva3_ram_init,
-               .fini = _nouveau_ram_fini,
+               .fini = nva3_ram_fini,
        },
 };
index bb1eb8f3e63918bdbcc82c5eb0b97af3370b7879..252575f3aa290ff4152d507d0de7157aeed407ea 100644 (file)
@@ -66,7 +66,7 @@ nouveau_sddr2_calc(struct nouveau_ram *ram)
        case 0x10:
                CL  = ram->next->bios.timing_10_CL;
                WR  = ram->next->bios.timing_10_WR;
-               DLL = !ram->next->bios.ramcfg_10_02_40;
+               DLL = !ram->next->bios.ramcfg_10_DLLoff;
                ODT = ram->next->bios.timing_10_ODT & 3;
                break;
        case 0x20:
index 83949b11833af46ca1537904691ca7d13112133a..a2dca4869e52f49ba9e012b19ad22e9ed86315cd 100644 (file)
@@ -80,7 +80,7 @@ nouveau_sddr3_calc(struct nouveau_ram *ram)
                CWL = ram->next->bios.timing_10_CWL;
                CL  = ram->next->bios.timing_10_CL;
                WR  = ram->next->bios.timing_10_WR;
-               DLL = !ram->next->bios.ramcfg_10_02_40;
+               DLL = !ram->next->bios.ramcfg_10_DLLoff;
                ODT = ram->next->bios.timing_10_ODT;
                break;
        case 0x20:
index 1864fa98e6b1f6e86017360f954047e7e4799637..2e30d5a62d6e023c110f4b2d41d6cba003b30e99 100644 (file)
@@ -54,7 +54,7 @@ nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
        }
 }
 
-static int
+int
 nv50_gpio_location(int line, u32 *reg, u32 *shift)
 {
        const u32 nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
index 2b1bf545e488df2b6b6ff99253e2cb38895de48f..0dc605db7ec8f7ab17d50c7d27ab72b1bc0d3128 100644 (file)
@@ -473,18 +473,56 @@ nouveau_i2c_extdev_sclass[] = {
        nouveau_anx9805_sclass,
 };
 
+static void
+nouveau_i2c_create_port(struct nouveau_i2c *i2c, int index, u8 type,
+                       struct dcb_i2c_entry *info)
+{
+       const struct nouveau_i2c_impl *impl = (void *)nv_oclass(i2c);
+       struct nouveau_oclass *oclass;
+       struct nouveau_object *parent;
+       struct nouveau_object *object;
+       int ret, pad;
+
+       if (info->share != DCB_I2C_UNUSED) {
+               pad    = info->share;
+               oclass = impl->pad_s;
+       } else {
+               if (type != DCB_I2C_NVIO_AUX)
+                       pad = 0x100 + info->drive;
+               else
+                       pad = 0x100 + info->auxch;
+               oclass = impl->pad_x;
+       }
+
+       ret = nouveau_object_ctor(NULL, nv_object(i2c), oclass, NULL, pad,
+                                &parent);
+       if (ret < 0)
+               return;
+
+       oclass = impl->sclass;
+       do {
+               ret = -EINVAL;
+               if (oclass->handle == type) {
+                       ret = nouveau_object_ctor(parent, nv_object(i2c),
+                                                 oclass, info, index,
+                                                &object);
+               }
+       } while (ret && (++oclass)->handle);
+
+       nouveau_object_ref(NULL, &parent);
+}
+
 int
 nouveau_i2c_create_(struct nouveau_object *parent,
                    struct nouveau_object *engine,
                    struct nouveau_oclass *oclass,
                    int length, void **pobject)
 {
-       const struct nouveau_i2c_impl *impl = (void *)oclass;
        struct nouveau_bios *bios = nouveau_bios(parent);
        struct nouveau_i2c *i2c;
        struct nouveau_object *object;
        struct dcb_i2c_entry info;
-       int ret, i, j, index = -1, pad;
+       int ret, i, j, index = -1;
        struct dcb_output outp;
        u8  ver, hdr;
        u32 data;
@@ -507,43 +545,40 @@ nouveau_i2c_create_(struct nouveau_object *parent,
        INIT_LIST_HEAD(&i2c->ports);
 
        while (!dcb_i2c_parse(bios, ++index, &info)) {
-               if (info.type == DCB_I2C_UNUSED)
+               switch (info.type) {
+               case DCB_I2C_NV04_BIT:
+               case DCB_I2C_NV4E_BIT:
+               case DCB_I2C_NVIO_BIT:
+                       nouveau_i2c_create_port(i2c, NV_I2C_PORT(index),
+                                               info.type, &info);
+                       break;
+               case DCB_I2C_NVIO_AUX:
+                       nouveau_i2c_create_port(i2c, NV_I2C_AUX(index),
+                                               info.type, &info);
+                       break;
+               case DCB_I2C_PMGR:
+                       if (info.drive != DCB_I2C_UNUSED) {
+                               nouveau_i2c_create_port(i2c, NV_I2C_PORT(index),
+                                                       DCB_I2C_NVIO_BIT,
+                                                       &info);
+                       }
+                       if (info.auxch != DCB_I2C_UNUSED) {
+                               nouveau_i2c_create_port(i2c, NV_I2C_AUX(index),
+                                                       DCB_I2C_NVIO_AUX,
+                                                       &info);
+                       }
+                       break;
+               case DCB_I2C_UNUSED:
+               default:
                        continue;
-
-               if (info.share != DCB_I2C_UNUSED) {
-                       if (info.type == DCB_I2C_NVIO_AUX)
-                               pad = info.drive;
-                       else
-                               pad = info.share;
-                       oclass = impl->pad_s;
-               } else {
-                       pad = 0x100 + info.drive;
-                       oclass = impl->pad_x;
                }
-
-               ret = nouveau_object_ctor(NULL, *pobject, oclass,
-                                         NULL, pad, &parent);
-               if (ret < 0)
-                       continue;
-
-               oclass = impl->sclass;
-               do {
-                       ret = -EINVAL;
-                       if (oclass->handle == info.type) {
-                               ret = nouveau_object_ctor(parent, *pobject,
-                                                         oclass, &info,
-                                                         index, &object);
-                       }
-               } while (ret && (++oclass)->handle);
-
-               nouveau_object_ref(NULL, &parent);
        }
 
        /* in addition to the busses specified in the i2c table, there
         * may be ddc/aux channels hiding behind external tmds/dp/etc
         * transmitters.
         */
-       index = ((index + 0x0f) / 0x10) * 0x10;
+       index = NV_I2C_EXT(0);
        i = -1;
        while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) {
                if (!outp.location || !outp.extdev)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c
new file mode 100644 (file)
index 0000000..06a2b87
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
+#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
+
+static void
+auxch_fini(struct nouveau_i2c *aux, int ch)
+{
+       nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00310000, 0x00000000);
+}
+
+static int
+auxch_init(struct nouveau_i2c *aux, int ch)
+{
+       const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+       const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+       const u32 urep = unksel ? 0x01000000 : 0x02000000;
+       u32 ctrl, timeout;
+
+       /* wait up to 1ms for any previous transaction to be done... */
+       timeout = 1000;
+       do {
+               ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
+               udelay(1);
+               if (!timeout--) {
+                       AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
+                       return -EBUSY;
+               }
+       } while (ctrl & 0x03010000);
+
+       /* set some magic, and wait up to 1ms for it to appear */
+       nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00300000, ureq);
+       timeout = 1000;
+       do {
+               ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
+               udelay(1);
+               if (!timeout--) {
+                       AUX_ERR("magic wait 0x%08x\n", ctrl);
+                       auxch_fini(aux, ch);
+                       return -EBUSY;
+               }
+       } while ((ctrl & 0x03000000) != urep);
+
+       return 0;
+}
+
+int
+gm204_aux(struct nouveau_i2c_port *base, bool retry,
+        u8 type, u32 addr, u8 *data, u8 size)
+{
+       struct nouveau_i2c *aux = nouveau_i2c(base);
+       struct nv50_i2c_port *port = (void *)base;
+       u32 ctrl, stat, timeout, retries;
+       u32 xbuf[4] = {};
+       int ch = port->addr;
+       int ret, i;
+
+       AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
+
+       ret = auxch_init(aux, ch);
+       if (ret)
+               goto out;
+
+       stat = nv_rd32(aux, 0x00d958 + (ch * 0x50));
+       if (!(stat & 0x10000000)) {
+               AUX_DBG("sink not detected\n");
+               ret = -ENXIO;
+               goto out;
+       }
+
+       if (!(type & 1)) {
+               memcpy(xbuf, data, size);
+               for (i = 0; i < 16; i += 4) {
+                       AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
+                       nv_wr32(aux, 0x00d930 + (ch * 0x50) + i, xbuf[i / 4]);
+               }
+       }
+
+       ctrl  = nv_rd32(aux, 0x00d954 + (ch * 0x50));
+       ctrl &= ~0x0001f0ff;
+       ctrl |= type << 12;
+       ctrl |= size - 1;
+       nv_wr32(aux, 0x00d950 + (ch * 0x50), addr);
+
+       /* (maybe) retry transaction a number of times on failure... */
+       for (retries = 0; !ret && retries < 32; retries++) {
+               /* reset, and delay a while if this is a retry */
+               nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x80000000 | ctrl);
+               nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00000000 | ctrl);
+               if (retries)
+                       udelay(400);
+
+               /* transaction request, wait up to 1ms for it to complete */
+               nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00010000 | ctrl);
+
+               timeout = 1000;
+               do {
+                       ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
+                       udelay(1);
+                       if (!timeout--) {
+                               AUX_ERR("tx req timeout 0x%08x\n", ctrl);
+                               ret = -EIO;
+                               goto out;
+                       }
+               } while (ctrl & 0x00010000);
+               ret = 1;
+
+               /* read status, and check if transaction completed ok */
+               stat = nv_mask(aux, 0x00d958 + (ch * 0x50), 0, 0);
+               if ((stat & 0x000f0000) == 0x00080000 ||
+                   (stat & 0x000f0000) == 0x00020000)
+                       ret = retry ? 0 : 1;
+               if ((stat & 0x00000100))
+                       ret = -ETIMEDOUT;
+               if ((stat & 0x00000e00))
+                       ret = -EIO;
+
+               AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
+       }
+
+       if (type & 1) {
+               for (i = 0; i < 16; i += 4) {
+                       xbuf[i / 4] = nv_rd32(aux, 0x00d940 + (ch * 0x50) + i);
+                       AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
+               }
+               memcpy(data, xbuf, size);
+       }
+
+out:
+       auxch_fini(aux, ch);
+       return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+}
+
+static const struct nouveau_i2c_func
+gm204_aux_func = {
+       .aux       = gm204_aux,
+};
+
+int
+gm204_aux_port_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 index,
+                   struct nouveau_object **pobject)
+{
+       struct dcb_i2c_entry *info = data;
+       struct nv50_i2c_port *port;
+       int ret;
+
+       ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+                                     &nouveau_i2c_aux_algo, &gm204_aux_func,
+                                     &port);
+       *pobject = nv_object(port);
+       if (ret)
+               return ret;
+
+       port->base.aux = info->auxch;
+       port->addr = info->auxch;
+       return 0;
+}
+
+struct nouveau_oclass
+gm204_i2c_sclass[] = {
+       { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
+         .ofuncs = &(struct nouveau_ofuncs) {
+                 .ctor = nvd0_i2c_port_ctor,
+                 .dtor = _nouveau_i2c_port_dtor,
+                 .init = nv50_i2c_port_init,
+                 .fini = _nouveau_i2c_port_fini,
+         },
+       },
+       { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
+         .ofuncs = &(struct nouveau_ofuncs) {
+                 .ctor = gm204_aux_port_ctor,
+                 .dtor = _nouveau_i2c_port_dtor,
+                 .init = _nouveau_i2c_port_init,
+                 .fini = _nouveau_i2c_port_fini,
+         },
+       },
+       {}
+};
+
+struct nouveau_oclass *
+gm204_i2c_oclass = &(struct nouveau_i2c_impl) {
+       .base.handle = NV_SUBDEV(I2C, 0x24),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = _nouveau_i2c_ctor,
+               .dtor = _nouveau_i2c_dtor,
+               .init = _nouveau_i2c_init,
+               .fini = _nouveau_i2c_fini,
+       },
+       .sclass = gm204_i2c_sclass,
+       .pad_x = &nv04_i2c_pad_oclass,
+       .pad_s = &gm204_i2c_pad_oclass,
+       .aux = 8,
+       .aux_stat = nve0_aux_stat,
+       .aux_mask = nve0_aux_mask,
+}.base;
index 5d2a77421c7408f909018a9ef46d0d041208b223..9ef965692fb15e8334a4b5ca06f8b3adf8fa638e 100644 (file)
@@ -10,8 +10,6 @@ struct nv50_i2c_priv {
 struct nv50_i2c_port {
        struct nouveau_i2c_port base;
        u32 addr;
-       u32 ctrl;
-       u32 data;
        u32 state;
 };
 
@@ -29,4 +27,8 @@ int  nv94_aux_port_ctor(struct nouveau_object *, struct nouveau_object *,
 void nv94_i2c_acquire(struct nouveau_i2c_port *);
 void nv94_i2c_release(struct nouveau_i2c_port *);
 
+int  nvd0_i2c_port_ctor(struct nouveau_object *, struct nouveau_object *,
+                       struct nouveau_oclass *, void *, u32,
+                       struct nouveau_object **);
+
 #endif
index f59c3a25546285dfe9304cbc7a86b5319d56a836..e383ee81f4d232dec342b43b0008e0e2867a9324 100644 (file)
@@ -214,10 +214,6 @@ nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 
        port->state = 7;
        port->addr = nv50_i2c_addr[info->drive];
-       if (info->share != DCB_I2C_UNUSED) {
-               port->ctrl = 0x00e500 + (info->share * 0x50);
-               port->data = 0x0000e001;
-       }
        return 0;
 }
 
@@ -242,13 +238,8 @@ nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       port->base.aux = info->drive;
-       port->addr = info->drive;
-       if (info->share != DCB_I2C_UNUSED) {
-               port->ctrl = 0x00e500 + (info->drive * 0x50);
-               port->data = 0x00002002;
-       }
-
+       port->base.aux = info->auxch;
+       port->addr = info->auxch;
        return 0;
 }
 
index 364ddb1c5f034d527d0f4e8566018ca242e14c10..fd99380502ec5fc679c74b8c378fdfe2cbfeaa6e 100644 (file)
@@ -48,7 +48,7 @@ nvd0_i2c_func = {
        .sense_sda = nvd0_i2c_sense_sda,
 };
 
-static int
+int
 nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                   struct nouveau_oclass *oclass, void *data, u32 index,
                   struct nouveau_object **pobject)
@@ -66,10 +66,6 @@ nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 
        port->state = 0x00000007;
        port->addr = 0x00d014 + (info->drive * 0x20);
-       if (info->share != DCB_I2C_UNUSED) {
-               port->ctrl = 0x00e500 + (info->share * 0x50);
-               port->data = 0x0000e001;
-       }
        return 0;
 }
 
index cae77e1ad8dc9b24c689b3b29669d946372dbcdf..25fe5c2d110e120baafe2a1f310f838c646bd7f0 100644 (file)
@@ -24,7 +24,7 @@
 
 #include "nv50.h"
 
-static void
+void
 nve0_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
 {
        u32 intr = nv_rd32(i2c, 0x00dc60);
@@ -38,7 +38,7 @@ nve0_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
        nv_wr32(i2c, 0x00dc60, intr);
 }
 
-static void
+void
 nve0_aux_mask(struct nouveau_i2c *i2c, u32 type, u32 mask, u32 data)
 {
        u32 temp = nv_rd32(i2c, 0x00dc68), i;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c
new file mode 100644 (file)
index 0000000..f0e6fbb
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "pad.h"
+
+struct gm204_i2c_pad {
+       struct nvkm_i2c_pad base;
+       int addr;
+};
+
+static int
+gm204_i2c_pad_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nouveau_i2c *i2c = (void *)object->engine;
+       struct gm204_i2c_pad *pad = (void *)object;
+       nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000001);
+       return nvkm_i2c_pad_fini(&pad->base, suspend);
+}
+
+static int
+gm204_i2c_pad_init(struct nouveau_object *object)
+{
+       struct nouveau_i2c *i2c = (void *)object->engine;
+       struct gm204_i2c_pad *pad = (void *)object;
+
+       switch (nv_oclass(pad->base.next)->handle) {
+       case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX):
+               nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x00000002);
+               break;
+       case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT):
+       default:
+               nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x0000c001);
+               break;
+       }
+
+       nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000000);
+       return nvkm_i2c_pad_init(&pad->base);
+}
+
+static int
+gm204_i2c_pad_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+                 struct nouveau_oclass *oclass, void *data, u32 index,
+                 struct nouveau_object **pobject)
+{
+       struct gm204_i2c_pad *pad;
+       int ret;
+
+       ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
+       *pobject = nv_object(pad);
+       if (ret)
+               return ret;
+
+       pad->addr = index * 0x50;;
+       return 0;
+}
+
+struct nouveau_oclass
+gm204_i2c_pad_oclass = {
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = gm204_i2c_pad_ctor,
+               .dtor = _nvkm_i2c_pad_dtor,
+               .init = gm204_i2c_pad_init,
+               .fini = gm204_i2c_pad_fini,
+       },
+};
index 780090b6425a19026b4302962d87d04291b9c34f..4fe7ae3fde4ef0b2fe9695841c3315e7f007e035 100644 (file)
@@ -5,6 +5,7 @@
 
 extern struct nouveau_oclass nv04_i2c_pad_oclass;
 extern struct nouveau_oclass nv94_i2c_pad_oclass;
+extern struct nouveau_oclass gm204_i2c_pad_oclass;
 
 #define nouveau_i2c_port_create(p,e,o,i,a,f,d)                                 \
        nouveau_i2c_port_create_((p), (e), (o), (i), (a), (f),                 \
@@ -82,4 +83,7 @@ struct nouveau_i2c_impl {
 void nv94_aux_stat(struct nouveau_i2c *, u32 *, u32 *, u32 *, u32 *);
 void nv94_aux_mask(struct nouveau_i2c *, u32, u32, u32);
 
+void nve0_aux_stat(struct nouveau_i2c *, u32 *, u32 *, u32 *, u32 *);
+void nve0_aux_mask(struct nouveau_i2c *, u32, u32, u32);
+
 #endif
index e89789a53b80101721e1c5e6ea2589edfb642a94..ec03f9a4290b4d72bb796655061f9fe12b011d68 100644 (file)
@@ -50,6 +50,7 @@ handler(WR32  , 0x0000, 0x0002, #memx_func_wr32)
 handler(WAIT  , 0x0004, 0x0000, #memx_func_wait)
 handler(DELAY , 0x0001, 0x0000, #memx_func_delay)
 handler(VBLANK, 0x0001, 0x0000, #memx_func_wait_vblank)
+handler(TRAIN , 0x0000, 0x0000, #memx_func_train)
 memx_func_tail:
 
 .equ #memx_func_size #memx_func_next - #memx_func_head
@@ -63,6 +64,10 @@ memx_ts_end:
 memx_data_head:
 .skip 0x0800
 memx_data_tail:
+
+memx_train_head:
+.skip 0x0100
+memx_train_tail:
 #endif
 
 /******************************************************************************
@@ -257,6 +262,101 @@ memx_func_delay:
        call(nsec)
        ret
 
+// description
+//
+// $r15 - current (memx)
+// $r4  - packet length
+// $r3  - opcode desciption
+// $r0  - zero
+memx_func_train:
+#if NVKM_PPWR_CHIPSET == GT215
+// $r5 - outer loop counter
+// $r6 - inner loop counter
+// $r7 - entry counter (#memx_train_head + $r7)
+       movw $r5 0x3
+       movw $r7 0x0
+
+// Read random memory to wake up... things
+       imm32($r9, 0x700000)
+       nv_rd32($r8,$r9)
+       movw $r14 0x2710
+       call(nsec)
+
+       memx_func_train_loop_outer:
+               mulu $r8 $r5 0x101
+               sethi $r8 0x02000000
+               imm32($r9, 0x1111e0)
+               nv_wr32($r9, $r8)
+               push $r5
+
+               movw $r6 0x0
+               memx_func_train_loop_inner:
+                       movw $r8 0x1111
+                       mulu $r9 $r6 $r8
+                       shl b32 $r8 $r9 0x10
+                       or $r8 $r9
+                       imm32($r9, 0x100720)
+                       nv_wr32($r9, $r8)
+
+                       imm32($r9, 0x100080)
+                       nv_rd32($r8, $r9)
+                       or $r8 $r8 0x20
+                       nv_wr32($r9, $r8)
+
+                       imm32($r9, 0x10053c)
+                       imm32($r8, 0x80003002)
+                       nv_wr32($r9, $r8)
+
+                       imm32($r14, 0x100560)
+                       imm32($r13, 0x80000000)
+                       add b32 $r12 $r13 0
+                       imm32($r11, 0x001e8480)
+                       call(wait)
+
+                       // $r5 - inner inner loop counter
+                       // $r9 - result
+                       movw $r5 0
+                       imm32($r9, 0x8300ffff)
+                       memx_func_train_loop_4x:
+                               imm32($r10, 0x100080)
+                               nv_rd32($r8, $r10)
+                               imm32($r11, 0xffffffdf)
+                               and $r8 $r11
+                               nv_wr32($r10, $r8)
+
+                               imm32($r10, 0x10053c)
+                               imm32($r8, 0x80003002)
+                               nv_wr32($r10, $r8)
+
+                               imm32($r14, 0x100560)
+                               imm32($r13, 0x80000000)
+                               mov b32 $r12 $r13
+                               imm32($r11, 0x00002710)
+                               call(wait)
+
+                               nv_rd32($r13, $r14)
+                               and $r9 $r9 $r13
+
+                               add b32 $r5 1
+                               cmp b16 $r5 0x4
+                               bra l #memx_func_train_loop_4x
+
+                       add b32 $r10 $r7 #memx_train_head
+                       st b32 D[$r10 + 0] $r9
+                       add b32 $r6 1
+                       add b32 $r7 4
+
+                       cmp b16 $r6 0x10
+                       bra l #memx_func_train_loop_inner
+
+               pop $r5
+               add b32 $r5 1
+               cmp b16 $r5 7
+               bra l #memx_func_train_loop_outer
+
+#endif
+       ret
+
 // description
 //
 // $r15 - current (memx)
@@ -307,8 +407,19 @@ memx_exec:
 // $r11 - data1
 // $r0  - zero
 memx_info:
+       cmp b16 $r12 0x1
+       bra e #memx_info_train
+
+       memx_info_data:
        mov $r12 #memx_data_head
        mov $r11 #memx_data_tail - #memx_data_head
+       bra #memx_info_send
+
+       memx_info_train:
+       mov $r12 #memx_train_head
+       mov $r11 #memx_train_tail - #memx_train_head
+
+       memx_info_send:
        call(send)
        ret
 
index 4d278a96b2bbb8459ff0517210967fe4d86dfc01..713e11e2953db613d871c2687ba6862b9fbc6470 100644 (file)
@@ -46,8 +46,8 @@ uint32_t nv108_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x584d454d,
-       0x0000061c,
-       0x0000060e,
+       0x0000062d,
+       0x0000061f,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -68,8 +68,8 @@ uint32_t nv108_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x46524550,
-       0x00000620,
-       0x0000061e,
+       0x00000631,
+       0x0000062f,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -90,8 +90,8 @@ uint32_t nv108_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x5f433249,
-       0x00000a24,
-       0x000008cb,
+       0x00000a35,
+       0x000008dc,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -112,8 +112,8 @@ uint32_t nv108_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x54534554,
-       0x00000a45,
-       0x00000a26,
+       0x00000a56,
+       0x00000a37,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -134,8 +134,8 @@ uint32_t nv108_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x454c4449,
-       0x00000a50,
-       0x00000a4e,
+       0x00000a61,
+       0x00000a5f,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -246,13 +246,15 @@ uint32_t nv108_pwr_data[] = {
        0x00010006,
        0x00000000,
        0x0000057b,
-/* 0x03b8: memx_func_tail */
-/* 0x03b8: memx_ts_start */
+       0x00000007,
        0x00000000,
-/* 0x03bc: memx_ts_end */
+       0x000005c3,
+/* 0x03c4: memx_func_tail */
+/* 0x03c4: memx_ts_start */
        0x00000000,
-/* 0x03c0: memx_data_head */
+/* 0x03c8: memx_ts_end */
        0x00000000,
+/* 0x03cc: memx_data_head */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -764,8 +766,75 @@ uint32_t nv108_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0bc0: memx_data_tail */
-/* 0x0bc0: i2c_scl_map */
+       0x00000000,
+/* 0x0bcc: memx_data_tail */
+/* 0x0bcc: memx_train_head */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+/* 0x0ccc: memx_train_tail */
+/* 0x0ccc: i2c_scl_map */
        0x00000400,
        0x00000800,
        0x00001000,
@@ -776,7 +845,7 @@ uint32_t nv108_pwr_data[] = {
        0x00020000,
        0x00040000,
        0x00080000,
-/* 0x0be8: i2c_sda_map */
+/* 0x0cf4: i2c_sda_map */
        0x00100000,
        0x00200000,
        0x00400000,
@@ -844,9 +913,6 @@ uint32_t nv108_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
 };
 
 uint32_t nv108_pwr_code[] = {
@@ -1215,10 +1281,10 @@ uint32_t nv108_pwr_code[] = {
        0xf40464f0,
        0x2c06f70b,
        0xb50066cf,
-       0x00f8ee06,
+       0x00f8f106,
 /* 0x0500: memx_func_leave */
        0x66cf2c06,
-       0xef06b500,
+       0xf206b500,
        0xe4400406,
        0x0006f607,
 /* 0x0512: memx_func_leave_wait */
@@ -1270,370 +1336,374 @@ uint32_t nv108_pwr_code[] = {
        0x9800f800,
        0x10b6001e,
        0x005d7e04,
-/* 0x05c3: memx_exec */
-       0xf900f800,
-       0xb2d0f9e0,
-/* 0x05cb: memx_exec_next */
-       0x98b2b2c1,
-       0x10b60013,
-       0xf034e704,
-       0xe033e701,
-       0x0132b601,
-       0x980c30f0,
-       0x55f9de35,
-       0x1ef412a6,
-       0xee0b98e5,
-       0xbbef0c98,
-       0xc44b02cb,
-       0x00bbcf07,
-       0xe0fcd0fc,
-       0x0002c27e,
-/* 0x0602: memx_info */
-       0xc04c00f8,
+/* 0x05c3: memx_func_train */
+       0xf800f800,
+/* 0x05c5: memx_exec */
+       0xf9e0f900,
+       0xb2c1b2d0,
+/* 0x05cd: memx_exec_next */
+       0x001398b2,
+       0xe70410b6,
+       0xe701f034,
+       0xb601e033,
+       0x30f00132,
+       0xde35980c,
+       0x12a655f9,
+       0x98e51ef4,
+       0x0c98f10b,
+       0x02cbbbf2,
+       0xcf07c44b,
+       0xd0fc00bb,
+       0xc27ee0fc,
+       0x00f80002,
+/* 0x0604: memx_info */
+       0xf401c670,
+/* 0x060a: memx_info_data */
+       0xcc4c0c0b,
        0x08004b03,
-       0x0002c27e,
-/* 0x060e: memx_recv */
-       0xd6b000f8,
-       0xb20bf401,
-       0xf400d6b0,
-       0x00f8eb0b,
-/* 0x061c: memx_init */
-/* 0x061e: perf_recv */
-       0x00f800f8,
-/* 0x0620: perf_init */
-/* 0x0622: i2c_drive_scl */
-       0x36b000f8,
-       0x0d0bf400,
-       0xf607e040,
-       0x04bd0001,
-/* 0x0632: i2c_drive_scl_lo */
-       0xe44000f8,
-       0x0001f607,
-       0x00f804bd,
-/* 0x063c: i2c_drive_sda */
-       0xf40036b0,
-       0xe0400d0b,
-       0x0002f607,
-       0x00f804bd,
-/* 0x064c: i2c_drive_sda_lo */
-       0xf607e440,
-       0x04bd0002,
-/* 0x0656: i2c_sense_scl */
-       0x32f400f8,
-       0x07c44301,
-       0xfd0033cf,
-       0x0bf40431,
-       0x0131f406,
-/* 0x0668: i2c_sense_scl_done */
-/* 0x066a: i2c_sense_sda */
-       0x32f400f8,
-       0x07c44301,
-       0xfd0033cf,
-       0x0bf40432,
-       0x0131f406,
-/* 0x067c: i2c_sense_sda_done */
-/* 0x067e: i2c_raise_scl */
-       0x40f900f8,
-       0x03089844,
-       0x06227e01,
-/* 0x0689: i2c_raise_scl_wait */
-       0x03e84e00,
-       0x00005d7e,
-       0x0006567e,
-       0xb60901f4,
-       0x1bf40142,
-/* 0x069d: i2c_raise_scl_done */
-       0xf840fcef,
-/* 0x06a1: i2c_start */
-       0x06567e00,
-       0x0d11f400,
-       0x00066a7e,
-       0xf40611f4,
-/* 0x06b2: i2c_start_rep */
-       0x00032e0e,
-       0x0006227e,
-       0x3c7e0103,
-       0x76bb0006,
-       0x0465b600,
-       0x659450f9,
-       0x0256bb04,
-       0x75fd50bd,
-       0x7e50fc04,
-       0xb600067e,
-       0x11f40464,
-/* 0x06dd: i2c_start_send */
-       0x7e00031d,
-       0x4e00063c,
-       0x5d7e1388,
-       0x00030000,
-       0x0006227e,
-       0x7e13884e,
-/* 0x06f7: i2c_start_out */
-       0xf800005d,
-/* 0x06f9: i2c_stop */
-       0x7e000300,
-       0x03000622,
-       0x063c7e00,
-       0x03e84e00,
-       0x00005d7e,
-       0x227e0103,
-       0x884e0006,
-       0x005d7e13,
+/* 0x0613: memx_info_train */
+       0x4c090ef4,
+       0x004b0bcc,
+/* 0x0619: memx_info_send */
+       0x02c27e01,
+/* 0x061f: memx_recv */
+       0xb000f800,
+       0x0bf401d6,
+       0x00d6b0a3,
+       0xf8dc0bf4,
+/* 0x062d: memx_init */
+/* 0x062f: perf_recv */
+       0xf800f800,
+/* 0x0631: perf_init */
+/* 0x0633: i2c_drive_scl */
+       0xb000f800,
+       0x0bf40036,
+       0x07e0400d,
+       0xbd0001f6,
+/* 0x0643: i2c_drive_scl_lo */
+       0x4000f804,
+       0x01f607e4,
+       0xf804bd00,
+/* 0x064d: i2c_drive_sda */
+       0x0036b000,
+       0x400d0bf4,
+       0x02f607e0,
+       0xf804bd00,
+/* 0x065d: i2c_drive_sda_lo */
+       0x07e44000,
+       0xbd0002f6,
+/* 0x0667: i2c_sense_scl */
+       0xf400f804,
+       0xc4430132,
+       0x0033cf07,
+       0xf40431fd,
+       0x31f4060b,
+/* 0x0679: i2c_sense_scl_done */
+/* 0x067b: i2c_sense_sda */
+       0xf400f801,
+       0xc4430132,
+       0x0033cf07,
+       0xf40432fd,
+       0x31f4060b,
+/* 0x068d: i2c_sense_sda_done */
+/* 0x068f: i2c_raise_scl */
+       0xf900f801,
+       0x08984440,
+       0x337e0103,
+/* 0x069a: i2c_raise_scl_wait */
+       0xe84e0006,
+       0x005d7e03,
+       0x06677e00,
+       0x0901f400,
+       0xf40142b6,
+/* 0x06ae: i2c_raise_scl_done */
+       0x40fcef1b,
+/* 0x06b2: i2c_start */
+       0x677e00f8,
+       0x11f40006,
+       0x067b7e0d,
+       0x0611f400,
+/* 0x06c3: i2c_start_rep */
+       0x032e0ef4,
+       0x06337e00,
        0x7e010300,
-       0x4e00063c,
-       0x5d7e1388,
-       0x00f80000,
-/* 0x0728: i2c_bitw */
-       0x00063c7e,
-       0x7e03e84e,
-       0xbb00005d,
+       0xbb00064d,
        0x65b60076,
        0x9450f904,
        0x56bb0465,
        0xfd50bd02,
        0x50fc0475,
-       0x00067e7e,
+       0x00068f7e,
        0xf40464b6,
-       0x884e1711,
-       0x005d7e13,
-       0x7e000300,
-       0x4e000622,
-       0x5d7e1388,
-/* 0x0766: i2c_bitw_out */
-       0x00f80000,
-/* 0x0768: i2c_bitr */
-       0x3c7e0103,
+/* 0x06ee: i2c_start_send */
+       0x00031d11,
+       0x00064d7e,
+       0x7e13884e,
+       0x0300005d,
+       0x06337e00,
+       0x13884e00,
+       0x00005d7e,
+/* 0x0708: i2c_start_out */
+/* 0x070a: i2c_stop */
+       0x000300f8,
+       0x0006337e,
+       0x4d7e0003,
        0xe84e0006,
        0x005d7e03,
-       0x0076bb00,
-       0xf90465b6,
-       0x04659450,
-       0xbd0256bb,
-       0x0475fd50,
-       0x7e7e50fc,
-       0x64b60006,
-       0x1a11f404,
-       0x00066a7e,
-       0x227e0003,
-       0x884e0006,
-       0x005d7e13,
-       0x013cf000,
-/* 0x07ab: i2c_bitr_done */
-       0xf80131f4,
-/* 0x07ad: i2c_get_byte */
-       0x04000500,
-/* 0x07b1: i2c_get_byte_next */
-       0x0154b608,
+       0x7e010300,
+       0x4e000633,
+       0x5d7e1388,
+       0x01030000,
+       0x00064d7e,
+       0x7e13884e,
+       0xf800005d,
+/* 0x0739: i2c_bitw */
+       0x064d7e00,
+       0x03e84e00,
+       0x00005d7e,
        0xb60076bb,
        0x50f90465,
        0xbb046594,
        0x50bd0256,
        0xfc0475fd,
-       0x07687e50,
+       0x068f7e50,
        0x0464b600,
-       0xfd2a11f4,
-       0x42b60553,
-       0xd81bf401,
-       0x76bb0103,
+       0x4e1711f4,
+       0x5d7e1388,
+       0x00030000,
+       0x0006337e,
+       0x7e13884e,
+/* 0x0777: i2c_bitw_out */
+       0xf800005d,
+/* 0x0779: i2c_bitr */
+       0x7e010300,
+       0x4e00064d,
+       0x5d7e03e8,
+       0x76bb0000,
        0x0465b600,
        0x659450f9,
        0x0256bb04,
        0x75fd50bd,
        0x7e50fc04,
-       0xb6000728,
-/* 0x07fa: i2c_get_byte_done */
-       0x00f80464,
-/* 0x07fc: i2c_put_byte */
-/* 0x07fe: i2c_put_byte_next */
-       0x42b60804,
-       0x3854ff01,
-       0xb60076bb,
-       0x50f90465,
-       0xbb046594,
-       0x50bd0256,
-       0xfc0475fd,
-       0x07287e50,
-       0x0464b600,
-       0xb03411f4,
-       0x1bf40046,
-       0x0076bbd8,
+       0xb600068f,
+       0x11f40464,
+       0x067b7e1a,
+       0x7e000300,
+       0x4e000633,
+       0x5d7e1388,
+       0x3cf00000,
+       0x0131f401,
+/* 0x07bc: i2c_bitr_done */
+/* 0x07be: i2c_get_byte */
+       0x000500f8,
+/* 0x07c2: i2c_get_byte_next */
+       0x54b60804,
+       0x0076bb01,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x797e50fc,
+       0x64b60007,
+       0x2a11f404,
+       0xb60553fd,
+       0x1bf40142,
+       0xbb0103d8,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x0007397e,
+/* 0x080b: i2c_get_byte_done */
+       0xf80464b6,
+/* 0x080d: i2c_put_byte */
+/* 0x080f: i2c_put_byte_next */
+       0xb6080400,
+       0x54ff0142,
+       0x0076bb38,
        0xf90465b6,
        0x04659450,
        0xbd0256bb,
        0x0475fd50,
-       0x687e50fc,
+       0x397e50fc,
        0x64b60007,
-       0x0f11f404,
-       0xb00076bb,
-       0x1bf40136,
-       0x0132f406,
-/* 0x0854: i2c_put_byte_done */
-/* 0x0856: i2c_addr */
-       0x76bb00f8,
+       0x3411f404,
+       0xf40046b0,
+       0x76bbd81b,
        0x0465b600,
        0x659450f9,
        0x0256bb04,
        0x75fd50bd,
        0x7e50fc04,
-       0xb60006a1,
+       0xb6000779,
        0x11f40464,
-       0x2ec3e729,
-       0x0134b601,
-       0xbb0553fd,
+       0x0076bb0f,
+       0xf40136b0,
+       0x32f4061b,
+/* 0x0865: i2c_put_byte_done */
+/* 0x0867: i2c_addr */
+       0xbb00f801,
        0x65b60076,
        0x9450f904,
        0x56bb0465,
        0xfd50bd02,
        0x50fc0475,
-       0x0007fc7e,
-/* 0x089b: i2c_addr_done */
-       0xf80464b6,
-/* 0x089d: i2c_acquire_addr */
-       0xf8cec700,
-       0xb705e4b6,
-       0xf8d014e0,
-/* 0x08a9: i2c_acquire */
-       0x089d7e00,
-       0x00047e00,
-       0x03d9f000,
-       0x00002e7e,
-/* 0x08ba: i2c_release */
-       0x9d7e00f8,
+       0x0006b27e,
+       0xf40464b6,
+       0xc3e72911,
+       0x34b6012e,
+       0x0553fd01,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0x080d7e50,
+       0x0464b600,
+/* 0x08ac: i2c_addr_done */
+/* 0x08ae: i2c_acquire_addr */
+       0xcec700f8,
+       0x05e4b6f8,
+       0xd014e0b7,
+/* 0x08ba: i2c_acquire */
+       0xae7e00f8,
        0x047e0008,
-       0xdaf00000,
+       0xd9f00000,
        0x002e7e03,
-/* 0x08cb: i2c_recv */
-       0xf400f800,
-       0xc1c70132,
-       0x0214b6f8,
-       0xf52816b0,
-       0xb801371f,
-       0x000be813,
-       0xb8003298,
-       0x000bc013,
-       0xf4003198,
-       0xd0f90231,
-       0xd0f9e0f9,
-       0x000067f1,
-       0x100063f1,
-       0xbb016792,
+/* 0x08cb: i2c_release */
+       0x7e00f800,
+       0x7e0008ae,
+       0xf0000004,
+       0x2e7e03da,
+       0x00f80000,
+/* 0x08dc: i2c_recv */
+       0xc70132f4,
+       0x14b6f8c1,
+       0x2816b002,
+       0x01371ff5,
+       0x0cf413b8,
+       0x00329800,
+       0x0ccc13b8,
+       0x00319800,
+       0xf90231f4,
+       0xf9e0f9d0,
+       0x0067f1d0,
+       0x0063f100,
+       0x01679210,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0x08ba7e50,
+       0x0464b600,
+       0xd6b0d0fc,
+       0xb01bf500,
+       0xbb000500,
        0x65b60076,
        0x9450f904,
        0x56bb0465,
        0xfd50bd02,
        0x50fc0475,
-       0x0008a97e,
-       0xfc0464b6,
-       0x00d6b0d0,
-       0x00b01bf5,
-       0x76bb0005,
+       0x0008677e,
+       0xf50464b6,
+       0xc700cc11,
+       0x76bbe0c5,
        0x0465b600,
        0x659450f9,
        0x0256bb04,
        0x75fd50bd,
        0x7e50fc04,
-       0xb6000856,
+       0xb600080d,
        0x11f50464,
-       0xc5c700cc,
-       0x0076bbe0,
-       0xf90465b6,
-       0x04659450,
-       0xbd0256bb,
-       0x0475fd50,
-       0xfc7e50fc,
-       0x64b60007,
-       0xa911f504,
-       0xbb010500,
-       0x65b60076,
-       0x9450f904,
-       0x56bb0465,
-       0xfd50bd02,
-       0x50fc0475,
-       0x0008567e,
-       0xf50464b6,
-       0xbb008711,
-       0x65b60076,
-       0x9450f904,
-       0x56bb0465,
-       0xfd50bd02,
-       0x50fc0475,
-       0x0007ad7e,
-       0xf40464b6,
-       0x5bcb6711,
-       0x0076bbe0,
-       0xf90465b6,
-       0x04659450,
-       0xbd0256bb,
-       0x0475fd50,
-       0xf97e50fc,
-       0x64b60006,
-       0xbd5bb204,
-       0x410ef474,
-/* 0x09d0: i2c_recv_not_rd08 */
-       0xf401d6b0,
-       0x00053b1b,
-       0x0008567e,
-       0xc73211f4,
-       0xfc7ee0c5,
-       0x11f40007,
-       0x7e000528,
-       0xf4000856,
-       0xb5c71f11,
-       0x07fc7ee0,
-       0x1511f400,
-       0x0006f97e,
-       0xc5c774bd,
-       0x091bf408,
-       0xf40232f4,
-/* 0x0a0e: i2c_recv_not_wr08 */
-/* 0x0a0e: i2c_recv_done */
-       0xcec7030e,
-       0x08ba7ef8,
-       0xfce0fc00,
-       0x0912f4d0,
-       0xc27e7cb2,
-/* 0x0a22: i2c_recv_exit */
-       0x00f80002,
-/* 0x0a24: i2c_init */
-/* 0x0a26: test_recv */
-       0x584100f8,
-       0x0011cf04,
-       0x400110b6,
-       0x01f60458,
-       0xf104bd00,
-       0xf1d900e7,
-       0x7e134fe3,
-       0xf8000201,
-/* 0x0a45: test_init */
-       0x08004e00,
-       0x0002017e,
-/* 0x0a4e: idle_recv */
-       0x00f800f8,
-/* 0x0a50: idle */
-       0x410031f4,
-       0x11cf0454,
+       0x010500a9,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0x08677e50,
+       0x0464b600,
+       0x008711f5,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0x07be7e50,
+       0x0464b600,
+       0xcb6711f4,
+       0x76bbe05b,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0x7e50fc04,
+       0xb600070a,
+       0x5bb20464,
+       0x0ef474bd,
+/* 0x09e1: i2c_recv_not_rd08 */
+       0x01d6b041,
+       0x053b1bf4,
+       0x08677e00,
+       0x3211f400,
+       0x7ee0c5c7,
+       0xf400080d,
+       0x00052811,
+       0x0008677e,
+       0xc71f11f4,
+       0x0d7ee0b5,
+       0x11f40008,
+       0x070a7e15,
+       0xc774bd00,
+       0x1bf408c5,
+       0x0232f409,
+/* 0x0a1f: i2c_recv_not_wr08 */
+/* 0x0a1f: i2c_recv_done */
+       0xc7030ef4,
+       0xcb7ef8ce,
+       0xe0fc0008,
+       0x12f4d0fc,
+       0x7e7cb209,
+/* 0x0a33: i2c_recv_exit */
+       0xf80002c2,
+/* 0x0a35: i2c_init */
+/* 0x0a37: test_recv */
+       0x4100f800,
+       0x11cf0458,
        0x0110b600,
-       0xf6045440,
+       0xf6045840,
        0x04bd0001,
-/* 0x0a64: idle_loop */
-       0x32f45801,
-/* 0x0a69: idle_proc */
-/* 0x0a69: idle_proc_exec */
-       0xb210f902,
-       0x02cb7e1e,
-       0xf410fc00,
-       0x31f40911,
-       0xf00ef402,
-/* 0x0a7c: idle_proc_next */
-       0xa65810b6,
-       0xe81bf41f,
-       0xf4e002f4,
-       0x0ef40028,
-       0x000000c6,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+       0xd900e7f1,
+       0x134fe3f1,
+       0x0002017e,
+/* 0x0a56: test_init */
+       0x004e00f8,
+       0x02017e08,
+/* 0x0a5f: idle_recv */
+       0xf800f800,
+/* 0x0a61: idle */
+       0x0031f400,
+       0xcf045441,
+       0x10b60011,
+       0x04544001,
+       0xbd0001f6,
+/* 0x0a75: idle_loop */
+       0xf4580104,
+/* 0x0a7a: idle_proc */
+/* 0x0a7a: idle_proc_exec */
+       0x10f90232,
+       0xcb7e1eb2,
+       0x10fc0002,
+       0xf40911f4,
+       0x0ef40231,
+/* 0x0a8d: idle_proc_next */
+       0x5810b6f0,
+       0x1bf41fa6,
+       0xe002f4e8,
+       0xf40028f4,
+       0x0000c60e,
        0x00000000,
        0x00000000,
        0x00000000,
index 64e97baabc3c4d71850641df7a3652d94703bbb4..d1f9b6cb66d7b542979c6f4ad2643be209e7ebe3 100644 (file)
@@ -46,8 +46,8 @@ uint32_t nva3_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x584d454d,
-       0x000006e0,
-       0x000006d2,
+       0x00000842,
+       0x00000834,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -68,8 +68,8 @@ uint32_t nva3_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x46524550,
-       0x000006e4,
-       0x000006e2,
+       0x00000846,
+       0x00000844,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -90,8 +90,8 @@ uint32_t nva3_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x5f433249,
-       0x00000b14,
-       0x000009b7,
+       0x00000c76,
+       0x00000b19,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -112,8 +112,8 @@ uint32_t nva3_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x54534554,
-       0x00000b3d,
-       0x00000b16,
+       0x00000c9f,
+       0x00000c78,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -134,8 +134,8 @@ uint32_t nva3_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x454c4449,
-       0x00000b49,
-       0x00000b47,
+       0x00000cab,
+       0x00000ca9,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -246,13 +246,15 @@ uint32_t nva3_pwr_data[] = {
        0x00010006,
        0x00000000,
        0x000005f8,
-/* 0x03b8: memx_func_tail */
-/* 0x03b8: memx_ts_start */
+       0x00000007,
        0x00000000,
-/* 0x03bc: memx_ts_end */
+       0x0000067e,
+/* 0x03c4: memx_func_tail */
+/* 0x03c4: memx_ts_start */
        0x00000000,
-/* 0x03c0: memx_data_head */
+/* 0x03c8: memx_ts_end */
        0x00000000,
+/* 0x03cc: memx_data_head */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -764,8 +766,75 @@ uint32_t nva3_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0bc0: memx_data_tail */
-/* 0x0bc0: i2c_scl_map */
+       0x00000000,
+/* 0x0bcc: memx_data_tail */
+/* 0x0bcc: memx_train_head */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+/* 0x0ccc: memx_train_tail */
+/* 0x0ccc: i2c_scl_map */
        0x00001000,
        0x00004000,
        0x00010000,
@@ -776,7 +845,7 @@ uint32_t nva3_pwr_data[] = {
        0x01000000,
        0x04000000,
        0x10000000,
-/* 0x0be8: i2c_sda_map */
+/* 0x0cf4: i2c_sda_map */
        0x00002000,
        0x00008000,
        0x00020000,
@@ -787,7 +856,7 @@ uint32_t nva3_pwr_data[] = {
        0x02000000,
        0x08000000,
        0x20000000,
-/* 0x0c10: i2c_ctrl */
+/* 0x0d1c: i2c_ctrl */
        0x0000e138,
        0x0000e150,
        0x0000e168,
@@ -845,9 +914,6 @@ uint32_t nva3_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
 };
 
 uint32_t nva3_pwr_code[] = {
@@ -1258,11 +1324,11 @@ uint32_t nva3_pwr_code[] = {
        0x67f0f30b,
        0x0664b62c,
        0x800066cf,
-       0x00f8ee06,
+       0x00f8f106,
 /* 0x05a8: memx_func_leave */
        0xb62c67f0,
        0x66cf0664,
-       0xef068000,
+       0xf2068000,
        0xf10467f0,
        0xb607e407,
        0x06d00604,
@@ -1323,408 +1389,479 @@ uint32_t nva3_pwr_code[] = {
        0x9800f8a4,
        0x10b6001e,
        0x7f21f404,
-/* 0x067e: memx_exec */
-       0xe0f900f8,
-       0xc1b9d0f9,
-       0x02b2b902,
-/* 0x0688: memx_exec_next */
-       0xb6001398,
-       0x34e70410,
-       0x33e701f0,
-       0x32b601e0,
-       0x0c30f001,
-       0xf9de3598,
-       0x0612b855,
-       0x98e41ef4,
-       0x0c98ee0b,
-       0x02cbbbef,
-       0x07c4b7f1,
-       0xcf06b4b6,
-       0xd0fc00bb,
-       0x21f5e0fc,
+/* 0x067e: memx_func_train */
+       0x57f100f8,
+       0x77f10003,
+       0x97f10000,
+       0x93f00000,
+       0x029eb970,
+       0xb90421f4,
+       0xe7f102d8,
+       0x21f42710,
+/* 0x069d: memx_func_train_loop_outer */
+       0x0158e07f,
+       0x0083f101,
+       0xe097f102,
+       0x1193f011,
+       0x80f990f9,
+       0xe0fcd0fc,
+       0xf93f21f4,
+       0x0067f150,
+/* 0x06bd: memx_func_train_loop_inner */
+       0x1187f100,
+       0x9068ff11,
+       0xfd109894,
+       0x97f10589,
+       0x93f00720,
+       0xf990f910,
+       0xfcd0fc80,
+       0x3f21f4e0,
+       0x008097f1,
+       0xb91093f0,
+       0x21f4029e,
+       0x02d8b904,
+       0xf92088c5,
+       0xfc80f990,
+       0xf4e0fcd0,
+       0x97f13f21,
+       0x93f0053c,
+       0x0287f110,
+       0x0083f130,
+       0xf990f980,
+       0xfcd0fc80,
+       0x3f21f4e0,
+       0x0560e7f1,
+       0xf110e3f0,
+       0xf10000d7,
+       0x908000d3,
+       0xb7f100dc,
+       0xb3f08480,
+       0xa421f41e,
+       0x000057f1,
+       0xffff97f1,
+       0x830093f1,
+/* 0x073c: memx_func_train_loop_4x */
+       0x0080a7f1,
+       0xb910a3f0,
+       0x21f402ae,
+       0x02d8b904,
+       0xffdfb7f1,
+       0xffffb3f1,
+       0xf9048bfd,
+       0xfc80f9a0,
+       0xf4e0fcd0,
+       0xa7f13f21,
+       0xa3f0053c,
+       0x0287f110,
+       0x0083f130,
+       0xf9a0f980,
+       0xfcd0fc80,
+       0x3f21f4e0,
+       0x0560e7f1,
+       0xf110e3f0,
+       0xf10000d7,
+       0xb98000d3,
+       0xb7f102dc,
+       0xb3f02710,
+       0xa421f400,
+       0xf402eeb9,
+       0xddb90421,
+       0x949dff02,
+       0x700150b6,
+       0x1ef40456,
+       0xcc7aa092,
+       0x00a9800b,
+       0xb60160b6,
+       0x66700470,
+       0x001ef510,
+       0xb650fcff,
+       0x56700150,
+       0xd41ef507,
+/* 0x07cf: memx_exec */
+       0xf900f8fe,
+       0xb9d0f9e0,
+       0xb2b902c1,
+/* 0x07d9: memx_exec_next */
+       0x00139802,
+       0xe70410b6,
+       0xe701f034,
+       0xb601e033,
+       0x30f00132,
+       0xde35980c,
+       0x12b855f9,
+       0xe41ef406,
+       0x98f10b98,
+       0xcbbbf20c,
+       0xc4b7f102,
+       0x06b4b607,
+       0xfc00bbcf,
+       0xf5e0fcd0,
+       0xf8034221,
+/* 0x0815: memx_info */
+       0x01c67000,
+/* 0x081b: memx_info_data */
+       0xf10e0bf4,
+       0xf103ccc7,
+       0xf40800b7,
+/* 0x0826: memx_info_train */
+       0xc7f10b0e,
+       0xb7f10bcc,
+/* 0x082e: memx_info_send */
+       0x21f50100,
        0x00f80342,
-/* 0x06c4: memx_info */
-       0x03c0c7f1,
-       0x0800b7f1,
-       0x034221f5,
-/* 0x06d2: memx_recv */
-       0xd6b000f8,
-       0xa90bf401,
-       0xf400d6b0,
-       0x00f8e90b,
-/* 0x06e0: memx_init */
-/* 0x06e2: perf_recv */
+/* 0x0834: memx_recv */
+       0xf401d6b0,
+       0xd6b0980b,
+       0xd80bf400,
+/* 0x0842: memx_init */
+       0x00f800f8,
+/* 0x0844: perf_recv */
+/* 0x0846: perf_init */
        0x00f800f8,
-/* 0x06e4: perf_init */
-/* 0x06e6: i2c_drive_scl */
+/* 0x0848: i2c_drive_scl */
+       0xf40036b0,
+       0x07f1110b,
+       0x04b607e0,
+       0x0001d006,
+       0x00f804bd,
+/* 0x085c: i2c_drive_scl_lo */
+       0x07e407f1,
+       0xd00604b6,
+       0x04bd0001,
+/* 0x086a: i2c_drive_sda */
        0x36b000f8,
        0x110bf400,
        0x07e007f1,
        0xd00604b6,
-       0x04bd0001,
-/* 0x06fa: i2c_drive_scl_lo */
+       0x04bd0002,
+/* 0x087e: i2c_drive_sda_lo */
        0x07f100f8,
        0x04b607e4,
-       0x0001d006,
-       0x00f804bd,
-/* 0x0708: i2c_drive_sda */
-       0xf40036b0,
-       0x07f1110b,
-       0x04b607e0,
        0x0002d006,
        0x00f804bd,
-/* 0x071c: i2c_drive_sda_lo */
-       0x07e407f1,
-       0xd00604b6,
-       0x04bd0002,
-/* 0x072a: i2c_sense_scl */
-       0x32f400f8,
-       0xc437f101,
-       0x0634b607,
-       0xfd0033cf,
-       0x0bf40431,
-       0x0131f406,
-/* 0x0740: i2c_sense_scl_done */
-/* 0x0742: i2c_sense_sda */
-       0x32f400f8,
-       0xc437f101,
-       0x0634b607,
-       0xfd0033cf,
-       0x0bf40432,
-       0x0131f406,
-/* 0x0758: i2c_sense_sda_done */
-/* 0x075a: i2c_raise_scl */
-       0x40f900f8,
-       0x089847f1,
-       0xf50137f0,
-/* 0x0767: i2c_raise_scl_wait */
-       0xf106e621,
-       0xf403e8e7,
-       0x21f57f21,
-       0x01f4072a,
-       0x0142b609,
-/* 0x077b: i2c_raise_scl_done */
-       0xfcef1bf4,
-/* 0x077f: i2c_start */
-       0xf500f840,
-       0xf4072a21,
-       0x21f50d11,
-       0x11f40742,
-       0x300ef406,
-/* 0x0790: i2c_start_rep */
-       0xf50037f0,
-       0xf006e621,
-       0x21f50137,
-       0x76bb0708,
-       0x0465b600,
-       0x659450f9,
-       0x0256bb04,
-       0x75fd50bd,
-       0xf550fc04,
-       0xb6075a21,
-       0x11f40464,
-/* 0x07bd: i2c_start_send */
-       0x0037f01f,
-       0x070821f5,
-       0x1388e7f1,
-       0xf07f21f4,
-       0x21f50037,
-       0xe7f106e6,
-       0x21f41388,
-/* 0x07d9: i2c_start_out */
-/* 0x07db: i2c_stop */
-       0xf000f87f,
-       0x21f50037,
-       0x37f006e6,
-       0x0821f500,
-       0xe8e7f107,
+/* 0x088c: i2c_sense_scl */
+       0xf10132f4,
+       0xb607c437,
+       0x33cf0634,
+       0x0431fd00,
+       0xf4060bf4,
+/* 0x08a2: i2c_sense_scl_done */
+       0x00f80131,
+/* 0x08a4: i2c_sense_sda */
+       0xf10132f4,
+       0xb607c437,
+       0x33cf0634,
+       0x0432fd00,
+       0xf4060bf4,
+/* 0x08ba: i2c_sense_sda_done */
+       0x00f80131,
+/* 0x08bc: i2c_raise_scl */
+       0x47f140f9,
+       0x37f00898,
+       0x4821f501,
+/* 0x08c9: i2c_raise_scl_wait */
+       0xe8e7f108,
        0x7f21f403,
-       0xf50137f0,
-       0xf106e621,
-       0xf41388e7,
-       0x37f07f21,
-       0x0821f501,
-       0x88e7f107,
-       0x7f21f413,
-/* 0x080e: i2c_bitw */
-       0x21f500f8,
-       0xe7f10708,
-       0x21f403e8,
-       0x0076bb7f,
-       0xf90465b6,
-       0x04659450,
-       0xbd0256bb,
-       0x0475fd50,
-       0x21f550fc,
-       0x64b6075a,
-       0x1811f404,
-       0x1388e7f1,
-       0xf07f21f4,
+       0x088c21f5,
+       0xb60901f4,
+       0x1bf40142,
+/* 0x08dd: i2c_raise_scl_done */
+       0xf840fcef,
+/* 0x08e1: i2c_start */
+       0x8c21f500,
+       0x0d11f408,
+       0x08a421f5,
+       0xf40611f4,
+/* 0x08f2: i2c_start_rep */
+       0x37f0300e,
+       0x4821f500,
+       0x0137f008,
+       0x086a21f5,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0xbc21f550,
+       0x0464b608,
+/* 0x091f: i2c_start_send */
+       0xf01f11f4,
        0x21f50037,
-       0xe7f106e6,
+       0xe7f1086a,
        0x21f41388,
-/* 0x084d: i2c_bitw_out */
-/* 0x084f: i2c_bitr */
-       0xf000f87f,
-       0x21f50137,
-       0xe7f10708,
-       0x21f403e8,
-       0x0076bb7f,
-       0xf90465b6,
-       0x04659450,
-       0xbd0256bb,
-       0x0475fd50,
-       0x21f550fc,
-       0x64b6075a,
-       0x1b11f404,
-       0x074221f5,
+       0x0037f07f,
+       0x084821f5,
+       0x1388e7f1,
+/* 0x093b: i2c_start_out */
+       0xf87f21f4,
+/* 0x093d: i2c_stop */
+       0x0037f000,
+       0x084821f5,
        0xf50037f0,
-       0xf106e621,
+       0xf1086a21,
+       0xf403e8e7,
+       0x37f07f21,
+       0x4821f501,
+       0x88e7f108,
+       0x7f21f413,
+       0xf50137f0,
+       0xf1086a21,
        0xf41388e7,
-       0x3cf07f21,
-       0x0131f401,
-/* 0x0894: i2c_bitr_done */
-/* 0x0896: i2c_get_byte */
-       0x57f000f8,
-       0x0847f000,
-/* 0x089c: i2c_get_byte_next */
-       0xbb0154b6,
+       0x00f87f21,
+/* 0x0970: i2c_bitw */
+       0x086a21f5,
+       0x03e8e7f1,
+       0xbb7f21f4,
        0x65b60076,
        0x9450f904,
        0x56bb0465,
        0xfd50bd02,
        0x50fc0475,
-       0x084f21f5,
+       0x08bc21f5,
        0xf40464b6,
-       0x53fd2b11,
-       0x0142b605,
-       0xf0d81bf4,
-       0x76bb0137,
-       0x0465b600,
-       0x659450f9,
-       0x0256bb04,
-       0x75fd50bd,
-       0xf550fc04,
-       0xb6080e21,
-/* 0x08e6: i2c_get_byte_done */
-       0x00f80464,
-/* 0x08e8: i2c_put_byte */
-/* 0x08eb: i2c_put_byte_next */
-       0xb60847f0,
-       0x54ff0142,
-       0x0076bb38,
+       0xe7f11811,
+       0x21f41388,
+       0x0037f07f,
+       0x084821f5,
+       0x1388e7f1,
+/* 0x09af: i2c_bitw_out */
+       0xf87f21f4,
+/* 0x09b1: i2c_bitr */
+       0x0137f000,
+       0x086a21f5,
+       0x03e8e7f1,
+       0xbb7f21f4,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x08bc21f5,
+       0xf40464b6,
+       0x21f51b11,
+       0x37f008a4,
+       0x4821f500,
+       0x88e7f108,
+       0x7f21f413,
+       0xf4013cf0,
+/* 0x09f6: i2c_bitr_done */
+       0x00f80131,
+/* 0x09f8: i2c_get_byte */
+       0xf00057f0,
+/* 0x09fe: i2c_get_byte_next */
+       0x54b60847,
+       0x0076bb01,
        0xf90465b6,
        0x04659450,
        0xbd0256bb,
        0x0475fd50,
        0x21f550fc,
-       0x64b6080e,
-       0x3411f404,
-       0xf40046b0,
-       0x76bbd81b,
-       0x0465b600,
-       0x659450f9,
-       0x0256bb04,
-       0x75fd50bd,
-       0xf550fc04,
-       0xb6084f21,
-       0x11f40464,
-       0x0076bb0f,
-       0xf40136b0,
-       0x32f4061b,
-/* 0x0941: i2c_put_byte_done */
-/* 0x0943: i2c_addr */
-       0xbb00f801,
+       0x64b609b1,
+       0x2b11f404,
+       0xb60553fd,
+       0x1bf40142,
+       0x0137f0d8,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0x7021f550,
+       0x0464b609,
+/* 0x0a48: i2c_get_byte_done */
+/* 0x0a4a: i2c_put_byte */
+       0x47f000f8,
+/* 0x0a4d: i2c_put_byte_next */
+       0x0142b608,
+       0xbb3854ff,
        0x65b60076,
        0x9450f904,
        0x56bb0465,
        0xfd50bd02,
        0x50fc0475,
-       0x077f21f5,
+       0x097021f5,
        0xf40464b6,
-       0xc3e72911,
-       0x34b6012e,
-       0x0553fd01,
+       0x46b03411,
+       0xd81bf400,
        0xb60076bb,
        0x50f90465,
        0xbb046594,
        0x50bd0256,
        0xfc0475fd,
-       0xe821f550,
-       0x0464b608,
-/* 0x0988: i2c_addr_done */
-/* 0x098a: i2c_acquire_addr */
-       0xcec700f8,
-       0x02e4b6f8,
-       0x0c10e0b7,
-       0xf800ee98,
-/* 0x0999: i2c_acquire */
-       0x8a21f500,
-       0x0421f409,
-       0xf403d9f0,
-       0x00f83f21,
-/* 0x09a8: i2c_release */
-       0x098a21f5,
-       0xf00421f4,
-       0x21f403da,
-/* 0x09b7: i2c_recv */
-       0xf400f83f,
-       0xc1c70132,
-       0x0214b6f8,
-       0xf52816b0,
-       0xa0013a1f,
-       0x980be813,
-       0x13a00032,
-       0x31980bc0,
-       0x0231f400,
-       0xe0f9d0f9,
-       0x67f1d0f9,
-       0x63f10000,
-       0x67921000,
-       0x0076bb01,
-       0xf90465b6,
-       0x04659450,
-       0xbd0256bb,
-       0x0475fd50,
-       0x21f550fc,
-       0x64b60999,
-       0xb0d0fc04,
-       0x1bf500d6,
-       0x57f000b3,
+       0xb121f550,
+       0x0464b609,
+       0xbb0f11f4,
+       0x36b00076,
+       0x061bf401,
+/* 0x0aa3: i2c_put_byte_done */
+       0xf80132f4,
+/* 0x0aa5: i2c_addr */
        0x0076bb00,
        0xf90465b6,
        0x04659450,
        0xbd0256bb,
        0x0475fd50,
        0x21f550fc,
-       0x64b60943,
-       0xd011f504,
-       0xe0c5c700,
-       0xb60076bb,
-       0x50f90465,
-       0xbb046594,
-       0x50bd0256,
-       0xfc0475fd,
-       0xe821f550,
-       0x0464b608,
-       0x00ad11f5,
-       0xbb0157f0,
+       0x64b608e1,
+       0x2911f404,
+       0x012ec3e7,
+       0xfd0134b6,
+       0x76bb0553,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0xf550fc04,
+       0xb60a4a21,
+/* 0x0aea: i2c_addr_done */
+       0x00f80464,
+/* 0x0aec: i2c_acquire_addr */
+       0xb6f8cec7,
+       0xe0b702e4,
+       0xee980d1c,
+/* 0x0afb: i2c_acquire */
+       0xf500f800,
+       0xf40aec21,
+       0xd9f00421,
+       0x3f21f403,
+/* 0x0b0a: i2c_release */
+       0x21f500f8,
+       0x21f40aec,
+       0x03daf004,
+       0xf83f21f4,
+/* 0x0b19: i2c_recv */
+       0x0132f400,
+       0xb6f8c1c7,
+       0x16b00214,
+       0x3a1ff528,
+       0xf413a001,
+       0x0032980c,
+       0x0ccc13a0,
+       0xf4003198,
+       0xd0f90231,
+       0xd0f9e0f9,
+       0x000067f1,
+       0x100063f1,
+       0xbb016792,
        0x65b60076,
        0x9450f904,
        0x56bb0465,
        0xfd50bd02,
        0x50fc0475,
-       0x094321f5,
-       0xf50464b6,
-       0xbb008a11,
+       0x0afb21f5,
+       0xfc0464b6,
+       0x00d6b0d0,
+       0x00b31bf5,
+       0xbb0057f0,
        0x65b60076,
        0x9450f904,
        0x56bb0465,
        0xfd50bd02,
        0x50fc0475,
-       0x089621f5,
-       0xf40464b6,
-       0x5bcb6a11,
-       0x0076bbe0,
+       0x0aa521f5,
+       0xf50464b6,
+       0xc700d011,
+       0x76bbe0c5,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0xf550fc04,
+       0xb60a4a21,
+       0x11f50464,
+       0x57f000ad,
+       0x0076bb01,
        0xf90465b6,
        0x04659450,
        0xbd0256bb,
        0x0475fd50,
        0x21f550fc,
-       0x64b607db,
-       0x025bb904,
-       0x0ef474bd,
-/* 0x0abd: i2c_recv_not_rd08 */
-       0x01d6b043,
-       0xf03d1bf4,
-       0x21f50057,
-       0x11f40943,
-       0xe0c5c733,
-       0x08e821f5,
-       0xf02911f4,
-       0x21f50057,
-       0x11f40943,
-       0xe0b5c71f,
-       0x08e821f5,
-       0xf51511f4,
-       0xbd07db21,
-       0x08c5c774,
-       0xf4091bf4,
-       0x0ef40232,
-/* 0x0afd: i2c_recv_not_wr08 */
-/* 0x0afd: i2c_recv_done */
-       0xf8cec703,
-       0x09a821f5,
-       0xd0fce0fc,
-       0xb90a12f4,
-       0x21f5027c,
-/* 0x0b12: i2c_recv_exit */
-       0x00f80342,
-/* 0x0b14: i2c_init */
-/* 0x0b16: test_recv */
-       0x17f100f8,
-       0x14b605d8,
-       0x0011cf06,
-       0xf10110b6,
-       0xb605d807,
-       0x01d00604,
-       0xf104bd00,
-       0xf1d900e7,
-       0xf5134fe3,
-       0xf8026221,
-/* 0x0b3d: test_init */
-       0x00e7f100,
-       0x6221f508,
-/* 0x0b47: idle_recv */
-       0xf800f802,
-/* 0x0b49: idle */
-       0x0031f400,
-       0x05d417f1,
+       0x64b60aa5,
+       0x8a11f504,
+       0x0076bb00,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x21f550fc,
+       0x64b609f8,
+       0x6a11f404,
+       0xbbe05bcb,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x093d21f5,
+       0xb90464b6,
+       0x74bd025b,
+/* 0x0c1f: i2c_recv_not_rd08 */
+       0xb0430ef4,
+       0x1bf401d6,
+       0x0057f03d,
+       0x0aa521f5,
+       0xc73311f4,
+       0x21f5e0c5,
+       0x11f40a4a,
+       0x0057f029,
+       0x0aa521f5,
+       0xc71f11f4,
+       0x21f5e0b5,
+       0x11f40a4a,
+       0x3d21f515,
+       0xc774bd09,
+       0x1bf408c5,
+       0x0232f409,
+/* 0x0c5f: i2c_recv_not_wr08 */
+/* 0x0c5f: i2c_recv_done */
+       0xc7030ef4,
+       0x21f5f8ce,
+       0xe0fc0b0a,
+       0x12f4d0fc,
+       0x027cb90a,
+       0x034221f5,
+/* 0x0c74: i2c_recv_exit */
+/* 0x0c76: i2c_init */
+       0x00f800f8,
+/* 0x0c78: test_recv */
+       0x05d817f1,
        0xcf0614b6,
        0x10b60011,
-       0xd407f101,
+       0xd807f101,
        0x0604b605,
        0xbd0001d0,
-/* 0x0b65: idle_loop */
-       0x5817f004,
-/* 0x0b6b: idle_proc */
-/* 0x0b6b: idle_proc_exec */
-       0xf90232f4,
-       0x021eb910,
-       0x034b21f5,
-       0x11f410fc,
-       0x0231f409,
-/* 0x0b7f: idle_proc_next */
-       0xb6ef0ef4,
-       0x1fb85810,
-       0xe61bf406,
-       0xf4dd02f4,
-       0x0ef40028,
-       0x000000bb,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+       0x00e7f104,
+       0x4fe3f1d9,
+       0x6221f513,
+/* 0x0c9f: test_init */
+       0xf100f802,
+       0xf50800e7,
+       0xf8026221,
+/* 0x0ca9: idle_recv */
+/* 0x0cab: idle */
+       0xf400f800,
+       0x17f10031,
+       0x14b605d4,
+       0x0011cf06,
+       0xf10110b6,
+       0xb605d407,
+       0x01d00604,
+/* 0x0cc7: idle_loop */
+       0xf004bd00,
+       0x32f45817,
+/* 0x0ccd: idle_proc */
+/* 0x0ccd: idle_proc_exec */
+       0xb910f902,
+       0x21f5021e,
+       0x10fc034b,
+       0xf40911f4,
+       0x0ef40231,
+/* 0x0ce1: idle_proc_next */
+       0x5810b6ef,
+       0xf4061fb8,
+       0x02f4e61b,
+       0x0028f4dd,
+       0x00bb0ef4,
        0x00000000,
        0x00000000,
        0x00000000,
index ca30fa4011b55907bb1f5d7b207b3291689f55e2..90221d973f84874f72d5f56a60bb06748844feed 100644 (file)
@@ -46,8 +46,8 @@ uint32_t nvc0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x584d454d,
-       0x0000074b,
-       0x0000073d,
+       0x0000075e,
+       0x00000750,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -68,8 +68,8 @@ uint32_t nvc0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x46524550,
-       0x0000074f,
-       0x0000074d,
+       0x00000762,
+       0x00000760,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -90,8 +90,8 @@ uint32_t nvc0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x5f433249,
-       0x00000b7f,
-       0x00000a22,
+       0x00000b92,
+       0x00000a35,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -112,8 +112,8 @@ uint32_t nvc0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x54534554,
-       0x00000ba8,
-       0x00000b81,
+       0x00000bbb,
+       0x00000b94,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -134,8 +134,8 @@ uint32_t nvc0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x454c4449,
-       0x00000bb4,
-       0x00000bb2,
+       0x00000bc7,
+       0x00000bc5,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -246,13 +246,15 @@ uint32_t nvc0_pwr_data[] = {
        0x00010006,
        0x00000000,
        0x00000663,
-/* 0x03b8: memx_func_tail */
-/* 0x03b8: memx_ts_start */
+       0x00000007,
        0x00000000,
-/* 0x03bc: memx_ts_end */
+       0x000006e9,
+/* 0x03c4: memx_func_tail */
+/* 0x03c4: memx_ts_start */
        0x00000000,
-/* 0x03c0: memx_data_head */
+/* 0x03c8: memx_ts_end */
        0x00000000,
+/* 0x03cc: memx_data_head */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -764,8 +766,75 @@ uint32_t nvc0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0bc0: memx_data_tail */
-/* 0x0bc0: i2c_scl_map */
+       0x00000000,
+/* 0x0bcc: memx_data_tail */
+/* 0x0bcc: memx_train_head */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+/* 0x0ccc: memx_train_tail */
+/* 0x0ccc: i2c_scl_map */
        0x00001000,
        0x00004000,
        0x00010000,
@@ -776,7 +845,7 @@ uint32_t nvc0_pwr_data[] = {
        0x01000000,
        0x04000000,
        0x10000000,
-/* 0x0be8: i2c_sda_map */
+/* 0x0cf4: i2c_sda_map */
        0x00002000,
        0x00008000,
        0x00020000,
@@ -787,7 +856,7 @@ uint32_t nvc0_pwr_data[] = {
        0x02000000,
        0x08000000,
        0x20000000,
-/* 0x0c10: i2c_ctrl */
+/* 0x0d1c: i2c_ctrl */
        0x0000e138,
        0x0000e150,
        0x0000e168,
@@ -845,9 +914,6 @@ uint32_t nvc0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
 };
 
 uint32_t nvc0_pwr_code[] = {
@@ -1272,10 +1338,10 @@ uint32_t nvc0_pwr_code[] = {
        0xcf0664b6,
        0x06800066,
 /* 0x05db: memx_func_leave */
-       0xf000f8ee,
+       0xf000f8f1,
        0x64b62c67,
        0x0066cf06,
-       0xf0ef0680,
+       0xf0f20680,
        0x07f10467,
        0x04b607e4,
        0x0006d006,
@@ -1350,382 +1416,450 @@ uint32_t nvc0_pwr_code[] = {
        0x1e9800f8,
        0x0410b600,
        0xf87f21f4,
-/* 0x06e9: memx_exec */
-       0xf9e0f900,
-       0x02c1b9d0,
-/* 0x06f3: memx_exec_next */
-       0x9802b2b9,
-       0x10b60013,
-       0xf034e704,
-       0xe033e701,
-       0x0132b601,
-       0x980c30f0,
-       0x55f9de35,
-       0xf40612b8,
-       0x0b98e41e,
-       0xef0c98ee,
-       0xf102cbbb,
-       0xb607c4b7,
-       0xbbcf06b4,
-       0xfcd0fc00,
-       0x4221f5e0,
-/* 0x072f: memx_info */
-       0xf100f803,
-       0xf103c0c7,
-       0xf50800b7,
+/* 0x06e9: memx_func_train */
+/* 0x06eb: memx_exec */
+       0xf900f800,
+       0xb9d0f9e0,
+       0xb2b902c1,
+/* 0x06f5: memx_exec_next */
+       0x00139802,
+       0xe70410b6,
+       0xe701f034,
+       0xb601e033,
+       0x30f00132,
+       0xde35980c,
+       0x12b855f9,
+       0xe41ef406,
+       0x98f10b98,
+       0xcbbbf20c,
+       0xc4b7f102,
+       0x06b4b607,
+       0xfc00bbcf,
+       0xf5e0fcd0,
        0xf8034221,
-/* 0x073d: memx_recv */
-       0x01d6b000,
-       0xb0a90bf4,
-       0x0bf400d6,
-/* 0x074b: memx_init */
-       0xf800f8e9,
-/* 0x074d: perf_recv */
-/* 0x074f: perf_init */
-       0xf800f800,
-/* 0x0751: i2c_drive_scl */
-       0x0036b000,
-       0xf1110bf4,
-       0xb607e007,
-       0x01d00604,
-       0xf804bd00,
-/* 0x0765: i2c_drive_scl_lo */
-       0xe407f100,
-       0x0604b607,
-       0xbd0001d0,
-/* 0x0773: i2c_drive_sda */
-       0xb000f804,
-       0x0bf40036,
-       0xe007f111,
-       0x0604b607,
-       0xbd0002d0,
-/* 0x0787: i2c_drive_sda_lo */
-       0xf100f804,
-       0xb607e407,
-       0x02d00604,
-       0xf804bd00,
-/* 0x0795: i2c_sense_scl */
-       0x0132f400,
-       0x07c437f1,
-       0xcf0634b6,
-       0x31fd0033,
-       0x060bf404,
-/* 0x07ab: i2c_sense_scl_done */
-       0xf80131f4,
-/* 0x07ad: i2c_sense_sda */
-       0x0132f400,
-       0x07c437f1,
-       0xcf0634b6,
-       0x32fd0033,
-       0x060bf404,
-/* 0x07c3: i2c_sense_sda_done */
-       0xf80131f4,
-/* 0x07c5: i2c_raise_scl */
-       0xf140f900,
-       0xf0089847,
-       0x21f50137,
-/* 0x07d2: i2c_raise_scl_wait */
-       0xe7f10751,
-       0x21f403e8,
-       0x9521f57f,
-       0x0901f407,
-       0xf40142b6,
-/* 0x07e6: i2c_raise_scl_done */
-       0x40fcef1b,
-/* 0x07ea: i2c_start */
-       0x21f500f8,
-       0x11f40795,
-       0xad21f50d,
-       0x0611f407,
-/* 0x07fb: i2c_start_rep */
-       0xf0300ef4,
-       0x21f50037,
-       0x37f00751,
-       0x7321f501,
-       0x0076bb07,
-       0xf90465b6,
-       0x04659450,
-       0xbd0256bb,
-       0x0475fd50,
-       0x21f550fc,
-       0x64b607c5,
-       0x1f11f404,
-/* 0x0828: i2c_start_send */
-       0xf50037f0,
-       0xf1077321,
-       0xf41388e7,
-       0x37f07f21,
-       0x5121f500,
-       0x88e7f107,
-       0x7f21f413,
-/* 0x0844: i2c_start_out */
-/* 0x0846: i2c_stop */
-       0x37f000f8,
-       0x5121f500,
-       0x0037f007,
-       0x077321f5,
-       0x03e8e7f1,
-       0xf07f21f4,
-       0x21f50137,
-       0xe7f10751,
-       0x21f41388,
-       0x0137f07f,
-       0x077321f5,
-       0x1388e7f1,
-       0xf87f21f4,
-/* 0x0879: i2c_bitw */
-       0x7321f500,
+/* 0x0731: memx_info */
+       0x01c67000,
+/* 0x0737: memx_info_data */
+       0xf10e0bf4,
+       0xf103ccc7,
+       0xf40800b7,
+/* 0x0742: memx_info_train */
+       0xc7f10b0e,
+       0xb7f10bcc,
+/* 0x074a: memx_info_send */
+       0x21f50100,
+       0x00f80342,
+/* 0x0750: memx_recv */
+       0xf401d6b0,
+       0xd6b0980b,
+       0xd80bf400,
+/* 0x075e: memx_init */
+       0x00f800f8,
+/* 0x0760: perf_recv */
+/* 0x0762: perf_init */
+       0x00f800f8,
+/* 0x0764: i2c_drive_scl */
+       0xf40036b0,
+       0x07f1110b,
+       0x04b607e0,
+       0x0001d006,
+       0x00f804bd,
+/* 0x0778: i2c_drive_scl_lo */
+       0x07e407f1,
+       0xd00604b6,
+       0x04bd0001,
+/* 0x0786: i2c_drive_sda */
+       0x36b000f8,
+       0x110bf400,
+       0x07e007f1,
+       0xd00604b6,
+       0x04bd0002,
+/* 0x079a: i2c_drive_sda_lo */
+       0x07f100f8,
+       0x04b607e4,
+       0x0002d006,
+       0x00f804bd,
+/* 0x07a8: i2c_sense_scl */
+       0xf10132f4,
+       0xb607c437,
+       0x33cf0634,
+       0x0431fd00,
+       0xf4060bf4,
+/* 0x07be: i2c_sense_scl_done */
+       0x00f80131,
+/* 0x07c0: i2c_sense_sda */
+       0xf10132f4,
+       0xb607c437,
+       0x33cf0634,
+       0x0432fd00,
+       0xf4060bf4,
+/* 0x07d6: i2c_sense_sda_done */
+       0x00f80131,
+/* 0x07d8: i2c_raise_scl */
+       0x47f140f9,
+       0x37f00898,
+       0x6421f501,
+/* 0x07e5: i2c_raise_scl_wait */
        0xe8e7f107,
        0x7f21f403,
+       0x07a821f5,
+       0xb60901f4,
+       0x1bf40142,
+/* 0x07f9: i2c_raise_scl_done */
+       0xf840fcef,
+/* 0x07fd: i2c_start */
+       0xa821f500,
+       0x0d11f407,
+       0x07c021f5,
+       0xf40611f4,
+/* 0x080e: i2c_start_rep */
+       0x37f0300e,
+       0x6421f500,
+       0x0137f007,
+       0x078621f5,
        0xb60076bb,
        0x50f90465,
        0xbb046594,
        0x50bd0256,
        0xfc0475fd,
-       0xc521f550,
+       0xd821f550,
        0x0464b607,
-       0xf11811f4,
-       0xf41388e7,
+/* 0x083b: i2c_start_send */
+       0xf01f11f4,
+       0x21f50037,
+       0xe7f10786,
+       0x21f41388,
+       0x0037f07f,
+       0x076421f5,
+       0x1388e7f1,
+/* 0x0857: i2c_start_out */
+       0xf87f21f4,
+/* 0x0859: i2c_stop */
+       0x0037f000,
+       0x076421f5,
+       0xf50037f0,
+       0xf1078621,
+       0xf403e8e7,
        0x37f07f21,
-       0x5121f500,
+       0x6421f501,
        0x88e7f107,
        0x7f21f413,
-/* 0x08b8: i2c_bitw_out */
-/* 0x08ba: i2c_bitr */
-       0x37f000f8,
-       0x7321f501,
-       0xe8e7f107,
-       0x7f21f403,
-       0xb60076bb,
-       0x50f90465,
-       0xbb046594,
-       0x50bd0256,
-       0xfc0475fd,
-       0xc521f550,
-       0x0464b607,
-       0xf51b11f4,
-       0xf007ad21,
-       0x21f50037,
-       0xe7f10751,
+       0xf50137f0,
+       0xf1078621,
+       0xf41388e7,
+       0x00f87f21,
+/* 0x088c: i2c_bitw */
+       0x078621f5,
+       0x03e8e7f1,
+       0xbb7f21f4,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x07d821f5,
+       0xf40464b6,
+       0xe7f11811,
        0x21f41388,
-       0x013cf07f,
-/* 0x08ff: i2c_bitr_done */
-       0xf80131f4,
-/* 0x0901: i2c_get_byte */
-       0x0057f000,
-/* 0x0907: i2c_get_byte_next */
-       0xb60847f0,
-       0x76bb0154,
-       0x0465b600,
-       0x659450f9,
-       0x0256bb04,
-       0x75fd50bd,
-       0xf550fc04,
-       0xb608ba21,
-       0x11f40464,
-       0x0553fd2b,
-       0xf40142b6,
-       0x37f0d81b,
+       0x0037f07f,
+       0x076421f5,
+       0x1388e7f1,
+/* 0x08cb: i2c_bitw_out */
+       0xf87f21f4,
+/* 0x08cd: i2c_bitr */
+       0x0137f000,
+       0x078621f5,
+       0x03e8e7f1,
+       0xbb7f21f4,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x07d821f5,
+       0xf40464b6,
+       0x21f51b11,
+       0x37f007c0,
+       0x6421f500,
+       0x88e7f107,
+       0x7f21f413,
+       0xf4013cf0,
+/* 0x0912: i2c_bitr_done */
+       0x00f80131,
+/* 0x0914: i2c_get_byte */
+       0xf00057f0,
+/* 0x091a: i2c_get_byte_next */
+       0x54b60847,
        0x0076bb01,
        0xf90465b6,
        0x04659450,
        0xbd0256bb,
        0x0475fd50,
        0x21f550fc,
-       0x64b60879,
-/* 0x0951: i2c_get_byte_done */
-/* 0x0953: i2c_put_byte */
-       0xf000f804,
-/* 0x0956: i2c_put_byte_next */
-       0x42b60847,
-       0x3854ff01,
+       0x64b608cd,
+       0x2b11f404,
+       0xb60553fd,
+       0x1bf40142,
+       0x0137f0d8,
        0xb60076bb,
        0x50f90465,
        0xbb046594,
        0x50bd0256,
        0xfc0475fd,
-       0x7921f550,
+       0x8c21f550,
        0x0464b608,
-       0xb03411f4,
-       0x1bf40046,
-       0x0076bbd8,
+/* 0x0964: i2c_get_byte_done */
+/* 0x0966: i2c_put_byte */
+       0x47f000f8,
+/* 0x0969: i2c_put_byte_next */
+       0x0142b608,
+       0xbb3854ff,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x088c21f5,
+       0xf40464b6,
+       0x46b03411,
+       0xd81bf400,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0xcd21f550,
+       0x0464b608,
+       0xbb0f11f4,
+       0x36b00076,
+       0x061bf401,
+/* 0x09bf: i2c_put_byte_done */
+       0xf80132f4,
+/* 0x09c1: i2c_addr */
+       0x0076bb00,
        0xf90465b6,
        0x04659450,
        0xbd0256bb,
        0x0475fd50,
        0x21f550fc,
-       0x64b608ba,
-       0x0f11f404,
-       0xb00076bb,
-       0x1bf40136,
-       0x0132f406,
-/* 0x09ac: i2c_put_byte_done */
-/* 0x09ae: i2c_addr */
-       0x76bb00f8,
+       0x64b607fd,
+       0x2911f404,
+       0x012ec3e7,
+       0xfd0134b6,
+       0x76bb0553,
        0x0465b600,
        0x659450f9,
        0x0256bb04,
        0x75fd50bd,
        0xf550fc04,
-       0xb607ea21,
-       0x11f40464,
-       0x2ec3e729,
-       0x0134b601,
-       0xbb0553fd,
+       0xb6096621,
+/* 0x0a06: i2c_addr_done */
+       0x00f80464,
+/* 0x0a08: i2c_acquire_addr */
+       0xb6f8cec7,
+       0xe0b702e4,
+       0xee980d1c,
+/* 0x0a17: i2c_acquire */
+       0xf500f800,
+       0xf40a0821,
+       0xd9f00421,
+       0x3f21f403,
+/* 0x0a26: i2c_release */
+       0x21f500f8,
+       0x21f40a08,
+       0x03daf004,
+       0xf83f21f4,
+/* 0x0a35: i2c_recv */
+       0x0132f400,
+       0xb6f8c1c7,
+       0x16b00214,
+       0x3a1ff528,
+       0xf413a001,
+       0x0032980c,
+       0x0ccc13a0,
+       0xf4003198,
+       0xd0f90231,
+       0xd0f9e0f9,
+       0x000067f1,
+       0x100063f1,
+       0xbb016792,
        0x65b60076,
        0x9450f904,
        0x56bb0465,
        0xfd50bd02,
        0x50fc0475,
-       0x095321f5,
-/* 0x09f3: i2c_addr_done */
-       0xf80464b6,
-/* 0x09f5: i2c_acquire_addr */
-       0xf8cec700,
-       0xb702e4b6,
-       0x980c10e0,
-       0x00f800ee,
-/* 0x0a04: i2c_acquire */
-       0x09f521f5,
-       0xf00421f4,
-       0x21f403d9,
-/* 0x0a13: i2c_release */
-       0xf500f83f,
-       0xf409f521,
-       0xdaf00421,
-       0x3f21f403,
-/* 0x0a22: i2c_recv */
-       0x32f400f8,
-       0xf8c1c701,
-       0xb00214b6,
-       0x1ff52816,
-       0x13a0013a,
-       0x32980be8,
-       0xc013a000,
-       0x0031980b,
-       0xf90231f4,
-       0xf9e0f9d0,
-       0x0067f1d0,
-       0x0063f100,
-       0x01679210,
-       0xb60076bb,
-       0x50f90465,
-       0xbb046594,
-       0x50bd0256,
-       0xfc0475fd,
-       0x0421f550,
-       0x0464b60a,
-       0xd6b0d0fc,
-       0xb31bf500,
-       0x0057f000,
-       0xb60076bb,
-       0x50f90465,
-       0xbb046594,
-       0x50bd0256,
-       0xfc0475fd,
-       0xae21f550,
-       0x0464b609,
-       0x00d011f5,
-       0xbbe0c5c7,
+       0x0a1721f5,
+       0xfc0464b6,
+       0x00d6b0d0,
+       0x00b31bf5,
+       0xbb0057f0,
        0x65b60076,
        0x9450f904,
        0x56bb0465,
        0xfd50bd02,
        0x50fc0475,
-       0x095321f5,
+       0x09c121f5,
        0xf50464b6,
-       0xf000ad11,
-       0x76bb0157,
+       0xc700d011,
+       0x76bbe0c5,
        0x0465b600,
        0x659450f9,
        0x0256bb04,
        0x75fd50bd,
        0xf550fc04,
-       0xb609ae21,
+       0xb6096621,
        0x11f50464,
-       0x76bb008a,
-       0x0465b600,
-       0x659450f9,
-       0x0256bb04,
-       0x75fd50bd,
-       0xf550fc04,
-       0xb6090121,
-       0x11f40464,
-       0xe05bcb6a,
-       0xb60076bb,
-       0x50f90465,
-       0xbb046594,
-       0x50bd0256,
-       0xfc0475fd,
-       0x4621f550,
-       0x0464b608,
-       0xbd025bb9,
-       0x430ef474,
-/* 0x0b28: i2c_recv_not_rd08 */
-       0xf401d6b0,
-       0x57f03d1b,
-       0xae21f500,
-       0x3311f409,
-       0xf5e0c5c7,
-       0xf4095321,
-       0x57f02911,
-       0xae21f500,
-       0x1f11f409,
-       0xf5e0b5c7,
-       0xf4095321,
-       0x21f51511,
-       0x74bd0846,
-       0xf408c5c7,
-       0x32f4091b,
-       0x030ef402,
-/* 0x0b68: i2c_recv_not_wr08 */
-/* 0x0b68: i2c_recv_done */
-       0xf5f8cec7,
-       0xfc0a1321,
-       0xf4d0fce0,
-       0x7cb90a12,
-       0x4221f502,
-/* 0x0b7d: i2c_recv_exit */
-/* 0x0b7f: i2c_init */
-       0xf800f803,
-/* 0x0b81: test_recv */
-       0xd817f100,
-       0x0614b605,
-       0xb60011cf,
-       0x07f10110,
-       0x04b605d8,
-       0x0001d006,
-       0xe7f104bd,
-       0xe3f1d900,
-       0x21f5134f,
-       0x00f80262,
-/* 0x0ba8: test_init */
-       0x0800e7f1,
-       0x026221f5,
-/* 0x0bb2: idle_recv */
+       0x57f000ad,
+       0x0076bb01,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x21f550fc,
+       0x64b609c1,
+       0x8a11f504,
+       0x0076bb00,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x21f550fc,
+       0x64b60914,
+       0x6a11f404,
+       0xbbe05bcb,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x085921f5,
+       0xb90464b6,
+       0x74bd025b,
+/* 0x0b3b: i2c_recv_not_rd08 */
+       0xb0430ef4,
+       0x1bf401d6,
+       0x0057f03d,
+       0x09c121f5,
+       0xc73311f4,
+       0x21f5e0c5,
+       0x11f40966,
+       0x0057f029,
+       0x09c121f5,
+       0xc71f11f4,
+       0x21f5e0b5,
+       0x11f40966,
+       0x5921f515,
+       0xc774bd08,
+       0x1bf408c5,
+       0x0232f409,
+/* 0x0b7b: i2c_recv_not_wr08 */
+/* 0x0b7b: i2c_recv_done */
+       0xc7030ef4,
+       0x21f5f8ce,
+       0xe0fc0a26,
+       0x12f4d0fc,
+       0x027cb90a,
+       0x034221f5,
+/* 0x0b90: i2c_recv_exit */
+/* 0x0b92: i2c_init */
        0x00f800f8,
-/* 0x0bb4: idle */
-       0xf10031f4,
-       0xb605d417,
-       0x11cf0614,
-       0x0110b600,
-       0x05d407f1,
-       0xd00604b6,
-       0x04bd0001,
-/* 0x0bd0: idle_loop */
-       0xf45817f0,
-/* 0x0bd6: idle_proc */
-/* 0x0bd6: idle_proc_exec */
-       0x10f90232,
-       0xf5021eb9,
-       0xfc034b21,
-       0x0911f410,
-       0xf40231f4,
-/* 0x0bea: idle_proc_next */
-       0x10b6ef0e,
-       0x061fb858,
-       0xf4e61bf4,
-       0x28f4dd02,
-       0xbb0ef400,
+/* 0x0b94: test_recv */
+       0x05d817f1,
+       0xcf0614b6,
+       0x10b60011,
+       0xd807f101,
+       0x0604b605,
+       0xbd0001d0,
+       0x00e7f104,
+       0x4fe3f1d9,
+       0x6221f513,
+/* 0x0bbb: test_init */
+       0xf100f802,
+       0xf50800e7,
+       0xf8026221,
+/* 0x0bc5: idle_recv */
+/* 0x0bc7: idle */
+       0xf400f800,
+       0x17f10031,
+       0x14b605d4,
+       0x0011cf06,
+       0xf10110b6,
+       0xb605d407,
+       0x01d00604,
+/* 0x0be3: idle_loop */
+       0xf004bd00,
+       0x32f45817,
+/* 0x0be9: idle_proc */
+/* 0x0be9: idle_proc_exec */
+       0xb910f902,
+       0x21f5021e,
+       0x10fc034b,
+       0xf40911f4,
+       0x0ef40231,
+/* 0x0bfd: idle_proc_next */
+       0x5810b6ef,
+       0xf4061fb8,
+       0x02f4e61b,
+       0x0028f4dd,
+       0x00bb0ef4,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
        0x00000000,
 };
index 12d86f72ad105a191f152d38c1f0b4363c856ba1..7e16aab44d855041dc00e974fced69f48a4aba56 100644 (file)
@@ -46,8 +46,8 @@ uint32_t nvd0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x584d454d,
-       0x00000678,
-       0x0000066a,
+       0x0000068b,
+       0x0000067d,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -68,8 +68,8 @@ uint32_t nvd0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x46524550,
-       0x0000067c,
-       0x0000067a,
+       0x0000068f,
+       0x0000068d,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -90,8 +90,8 @@ uint32_t nvd0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x5f433249,
-       0x00000a97,
-       0x0000093a,
+       0x00000aaa,
+       0x0000094d,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -112,8 +112,8 @@ uint32_t nvd0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x54534554,
-       0x00000aba,
-       0x00000a99,
+       0x00000acd,
+       0x00000aac,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -134,8 +134,8 @@ uint32_t nvd0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x454c4449,
-       0x00000ac6,
-       0x00000ac4,
+       0x00000ad9,
+       0x00000ad7,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -246,13 +246,15 @@ uint32_t nvd0_pwr_data[] = {
        0x00010006,
        0x00000000,
        0x000005d3,
-/* 0x03b8: memx_func_tail */
-/* 0x03b8: memx_ts_start */
+       0x00000007,
        0x00000000,
-/* 0x03bc: memx_ts_end */
+       0x00000619,
+/* 0x03c4: memx_func_tail */
+/* 0x03c4: memx_ts_start */
        0x00000000,
-/* 0x03c0: memx_data_head */
+/* 0x03c8: memx_ts_end */
        0x00000000,
+/* 0x03cc: memx_data_head */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -764,8 +766,75 @@ uint32_t nvd0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0bc0: memx_data_tail */
-/* 0x0bc0: i2c_scl_map */
+       0x00000000,
+/* 0x0bcc: memx_data_tail */
+/* 0x0bcc: memx_train_head */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+/* 0x0ccc: memx_train_tail */
+/* 0x0ccc: i2c_scl_map */
        0x00000400,
        0x00000800,
        0x00001000,
@@ -776,7 +845,7 @@ uint32_t nvd0_pwr_data[] = {
        0x00020000,
        0x00040000,
        0x00080000,
-/* 0x0be8: i2c_sda_map */
+/* 0x0cf4: i2c_sda_map */
        0x00100000,
        0x00200000,
        0x00400000,
@@ -844,9 +913,6 @@ uint32_t nvd0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
 };
 
 uint32_t nvd0_pwr_code[] = {
@@ -1236,11 +1302,11 @@ uint32_t nvd0_pwr_code[] = {
        0x0bf40464,
        0x2c67f0f6,
        0x800066cf,
-       0x00f8ee06,
+       0x00f8f106,
 /* 0x0554: memx_func_leave */
        0xcf2c67f0,
        0x06800066,
-       0x0467f0ef,
+       0x0467f0f2,
        0x07e407f1,
        0xbd0006d0,
 /* 0x0569: memx_func_leave_wait */
@@ -1292,379 +1358,383 @@ uint32_t nvd0_pwr_code[] = {
        0x1e9800f8,
        0x0410b600,
        0xf86721f4,
-/* 0x0619: memx_exec */
-       0xf9e0f900,
-       0x02c1b9d0,
-/* 0x0623: memx_exec_next */
-       0x9802b2b9,
-       0x10b60013,
-       0xf034e704,
-       0xe033e701,
-       0x0132b601,
-       0x980c30f0,
-       0x55f9de35,
-       0xf40612b8,
-       0x0b98e41e,
-       0xef0c98ee,
-       0xf102cbbb,
-       0xcf07c4b7,
-       0xd0fc00bb,
-       0x21f5e0fc,
-       0x00f802f1,
-/* 0x065c: memx_info */
-       0x03c0c7f1,
-       0x0800b7f1,
+/* 0x0619: memx_func_train */
+/* 0x061b: memx_exec */
+       0xf900f800,
+       0xb9d0f9e0,
+       0xb2b902c1,
+/* 0x0625: memx_exec_next */
+       0x00139802,
+       0xe70410b6,
+       0xe701f034,
+       0xb601e033,
+       0x30f00132,
+       0xde35980c,
+       0x12b855f9,
+       0xe41ef406,
+       0x98f10b98,
+       0xcbbbf20c,
+       0xc4b7f102,
+       0x00bbcf07,
+       0xe0fcd0fc,
        0x02f121f5,
-/* 0x066a: memx_recv */
-       0xd6b000f8,
-       0xac0bf401,
-       0xf400d6b0,
-       0x00f8e90b,
-/* 0x0678: memx_init */
-/* 0x067a: perf_recv */
-       0x00f800f8,
-/* 0x067c: perf_init */
-/* 0x067e: i2c_drive_scl */
-       0x36b000f8,
-       0x0e0bf400,
-       0x07e007f1,
-       0xbd0001d0,
-/* 0x068f: i2c_drive_scl_lo */
-       0xf100f804,
-       0xd007e407,
+/* 0x065e: memx_info */
+       0xc67000f8,
+       0x0e0bf401,
+/* 0x0664: memx_info_data */
+       0x03ccc7f1,
+       0x0800b7f1,
+/* 0x066f: memx_info_train */
+       0xf10b0ef4,
+       0xf10bccc7,
+/* 0x0677: memx_info_send */
+       0xf50100b7,
+       0xf802f121,
+/* 0x067d: memx_recv */
+       0x01d6b000,
+       0xb09b0bf4,
+       0x0bf400d6,
+/* 0x068b: memx_init */
+       0xf800f8d8,
+/* 0x068d: perf_recv */
+/* 0x068f: perf_init */
+       0xf800f800,
+/* 0x0691: i2c_drive_scl */
+       0x0036b000,
+       0xf10e0bf4,
+       0xd007e007,
        0x04bd0001,
-/* 0x069a: i2c_drive_sda */
-       0x36b000f8,
-       0x0e0bf400,
-       0x07e007f1,
-       0xbd0002d0,
-/* 0x06ab: i2c_drive_sda_lo */
-       0xf100f804,
-       0xd007e407,
+/* 0x06a2: i2c_drive_scl_lo */
+       0x07f100f8,
+       0x01d007e4,
+       0xf804bd00,
+/* 0x06ad: i2c_drive_sda */
+       0x0036b000,
+       0xf10e0bf4,
+       0xd007e007,
        0x04bd0002,
-/* 0x06b6: i2c_sense_scl */
+/* 0x06be: i2c_drive_sda_lo */
+       0x07f100f8,
+       0x02d007e4,
+       0xf804bd00,
+/* 0x06c9: i2c_sense_scl */
+       0x0132f400,
+       0x07c437f1,
+       0xfd0033cf,
+       0x0bf40431,
+       0x0131f406,
+/* 0x06dc: i2c_sense_scl_done */
+/* 0x06de: i2c_sense_sda */
        0x32f400f8,
        0xc437f101,
        0x0033cf07,
-       0xf40431fd,
+       0xf40432fd,
        0x31f4060b,
-/* 0x06c9: i2c_sense_scl_done */
-/* 0x06cb: i2c_sense_sda */
-       0xf400f801,
-       0x37f10132,
-       0x33cf07c4,
-       0x0432fd00,
-       0xf4060bf4,
-/* 0x06de: i2c_sense_sda_done */
-       0x00f80131,
-/* 0x06e0: i2c_raise_scl */
-       0x47f140f9,
-       0x37f00898,
-       0x7e21f501,
-/* 0x06ed: i2c_raise_scl_wait */
-       0xe8e7f106,
-       0x6721f403,
-       0x06b621f5,
-       0xb60901f4,
-       0x1bf40142,
-/* 0x0701: i2c_raise_scl_done */
-       0xf840fcef,
-/* 0x0705: i2c_start */
-       0xb621f500,
-       0x0d11f406,
-       0x06cb21f5,
-       0xf40611f4,
-/* 0x0716: i2c_start_rep */
-       0x37f0300e,
-       0x7e21f500,
-       0x0137f006,
-       0x069a21f5,
-       0xb60076bb,
-       0x50f90465,
-       0xbb046594,
-       0x50bd0256,
-       0xfc0475fd,
-       0xe021f550,
-       0x0464b606,
-/* 0x0743: i2c_start_send */
-       0xf01f11f4,
-       0x21f50037,
-       0xe7f1069a,
-       0x21f41388,
-       0x0037f067,
-       0x067e21f5,
-       0x1388e7f1,
-/* 0x075f: i2c_start_out */
-       0xf86721f4,
-/* 0x0761: i2c_stop */
-       0x0037f000,
-       0x067e21f5,
-       0xf50037f0,
-       0xf1069a21,
-       0xf403e8e7,
-       0x37f06721,
-       0x7e21f501,
-       0x88e7f106,
-       0x6721f413,
-       0xf50137f0,
-       0xf1069a21,
-       0xf41388e7,
-       0x00f86721,
-/* 0x0794: i2c_bitw */
-       0x069a21f5,
+/* 0x06f1: i2c_sense_sda_done */
+/* 0x06f3: i2c_raise_scl */
+       0xf900f801,
+       0x9847f140,
+       0x0137f008,
+       0x069121f5,
+/* 0x0700: i2c_raise_scl_wait */
        0x03e8e7f1,
-       0xbb6721f4,
-       0x65b60076,
-       0x9450f904,
-       0x56bb0465,
-       0xfd50bd02,
-       0x50fc0475,
-       0x06e021f5,
-       0xf40464b6,
-       0xe7f11811,
-       0x21f41388,
-       0x0037f067,
-       0x067e21f5,
-       0x1388e7f1,
-/* 0x07d3: i2c_bitw_out */
-       0xf86721f4,
-/* 0x07d5: i2c_bitr */
-       0x0137f000,
-       0x069a21f5,
-       0x03e8e7f1,
-       0xbb6721f4,
+       0xf56721f4,
+       0xf406c921,
+       0x42b60901,
+       0xef1bf401,
+/* 0x0714: i2c_raise_scl_done */
+       0x00f840fc,
+/* 0x0718: i2c_start */
+       0x06c921f5,
+       0xf50d11f4,
+       0xf406de21,
+       0x0ef40611,
+/* 0x0729: i2c_start_rep */
+       0x0037f030,
+       0x069121f5,
+       0xf50137f0,
+       0xbb06ad21,
        0x65b60076,
        0x9450f904,
        0x56bb0465,
        0xfd50bd02,
        0x50fc0475,
-       0x06e021f5,
+       0x06f321f5,
        0xf40464b6,
-       0x21f51b11,
-       0x37f006cb,
-       0x7e21f500,
+/* 0x0756: i2c_start_send */
+       0x37f01f11,
+       0xad21f500,
        0x88e7f106,
        0x6721f413,
-       0xf4013cf0,
-/* 0x081a: i2c_bitr_done */
-       0x00f80131,
-/* 0x081c: i2c_get_byte */
-       0xf00057f0,
-/* 0x0822: i2c_get_byte_next */
-       0x54b60847,
-       0x0076bb01,
-       0xf90465b6,
-       0x04659450,
-       0xbd0256bb,
-       0x0475fd50,
-       0x21f550fc,
-       0x64b607d5,
-       0x2b11f404,
-       0xb60553fd,
-       0x1bf40142,
-       0x0137f0d8,
+       0xf50037f0,
+       0xf1069121,
+       0xf41388e7,
+/* 0x0772: i2c_start_out */
+       0x00f86721,
+/* 0x0774: i2c_stop */
+       0xf50037f0,
+       0xf0069121,
+       0x21f50037,
+       0xe7f106ad,
+       0x21f403e8,
+       0x0137f067,
+       0x069121f5,
+       0x1388e7f1,
+       0xf06721f4,
+       0x21f50137,
+       0xe7f106ad,
+       0x21f41388,
+/* 0x07a7: i2c_bitw */
+       0xf500f867,
+       0xf106ad21,
+       0xf403e8e7,
+       0x76bb6721,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0xf550fc04,
+       0xb606f321,
+       0x11f40464,
+       0x88e7f118,
+       0x6721f413,
+       0xf50037f0,
+       0xf1069121,
+       0xf41388e7,
+/* 0x07e6: i2c_bitw_out */
+       0x00f86721,
+/* 0x07e8: i2c_bitr */
+       0xf50137f0,
+       0xf106ad21,
+       0xf403e8e7,
+       0x76bb6721,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0xf550fc04,
+       0xb606f321,
+       0x11f40464,
+       0xde21f51b,
+       0x0037f006,
+       0x069121f5,
+       0x1388e7f1,
+       0xf06721f4,
+       0x31f4013c,
+/* 0x082d: i2c_bitr_done */
+/* 0x082f: i2c_get_byte */
+       0xf000f801,
+       0x47f00057,
+/* 0x0835: i2c_get_byte_next */
+       0x0154b608,
        0xb60076bb,
        0x50f90465,
        0xbb046594,
        0x50bd0256,
        0xfc0475fd,
-       0x9421f550,
+       0xe821f550,
        0x0464b607,
-/* 0x086c: i2c_get_byte_done */
-/* 0x086e: i2c_put_byte */
-       0x47f000f8,
-/* 0x0871: i2c_put_byte_next */
-       0x0142b608,
-       0xbb3854ff,
+       0xfd2b11f4,
+       0x42b60553,
+       0xd81bf401,
+       0xbb0137f0,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x07a721f5,
+/* 0x087f: i2c_get_byte_done */
+       0xf80464b6,
+/* 0x0881: i2c_put_byte */
+       0x0847f000,
+/* 0x0884: i2c_put_byte_next */
+       0xff0142b6,
+       0x76bb3854,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0xf550fc04,
+       0xb607a721,
+       0x11f40464,
+       0x0046b034,
+       0xbbd81bf4,
        0x65b60076,
        0x9450f904,
        0x56bb0465,
        0xfd50bd02,
        0x50fc0475,
-       0x079421f5,
+       0x07e821f5,
        0xf40464b6,
-       0x46b03411,
-       0xd81bf400,
+       0x76bb0f11,
+       0x0136b000,
+       0xf4061bf4,
+/* 0x08da: i2c_put_byte_done */
+       0x00f80132,
+/* 0x08dc: i2c_addr */
        0xb60076bb,
        0x50f90465,
        0xbb046594,
        0x50bd0256,
        0xfc0475fd,
-       0xd521f550,
+       0x1821f550,
        0x0464b607,
-       0xbb0f11f4,
-       0x36b00076,
-       0x061bf401,
-/* 0x08c7: i2c_put_byte_done */
-       0xf80132f4,
-/* 0x08c9: i2c_addr */
-       0x0076bb00,
+       0xe72911f4,
+       0xb6012ec3,
+       0x53fd0134,
+       0x0076bb05,
        0xf90465b6,
        0x04659450,
        0xbd0256bb,
        0x0475fd50,
        0x21f550fc,
-       0x64b60705,
-       0x2911f404,
-       0x012ec3e7,
-       0xfd0134b6,
-       0x76bb0553,
-       0x0465b600,
-       0x659450f9,
-       0x0256bb04,
-       0x75fd50bd,
-       0xf550fc04,
-       0xb6086e21,
-/* 0x090e: i2c_addr_done */
-       0x00f80464,
-/* 0x0910: i2c_acquire_addr */
-       0xb6f8cec7,
-       0xe0b705e4,
-       0x00f8d014,
-/* 0x091c: i2c_acquire */
-       0x091021f5,
-       0xf00421f4,
-       0x21f403d9,
-/* 0x092b: i2c_release */
-       0xf500f833,
-       0xf4091021,
-       0xdaf00421,
+       0x64b60881,
+/* 0x0921: i2c_addr_done */
+/* 0x0923: i2c_acquire_addr */
+       0xc700f804,
+       0xe4b6f8ce,
+       0x14e0b705,
+/* 0x092f: i2c_acquire */
+       0xf500f8d0,
+       0xf4092321,
+       0xd9f00421,
        0x3321f403,
-/* 0x093a: i2c_recv */
-       0x32f400f8,
-       0xf8c1c701,
-       0xb00214b6,
-       0x1ff52816,
-       0x13a0013a,
-       0x32980be8,
-       0xc013a000,
-       0x0031980b,
-       0xf90231f4,
-       0xf9e0f9d0,
-       0x0067f1d0,
-       0x0063f100,
-       0x01679210,
-       0xb60076bb,
-       0x50f90465,
-       0xbb046594,
-       0x50bd0256,
-       0xfc0475fd,
-       0x1c21f550,
-       0x0464b609,
-       0xd6b0d0fc,
-       0xb31bf500,
-       0x0057f000,
-       0xb60076bb,
-       0x50f90465,
-       0xbb046594,
-       0x50bd0256,
-       0xfc0475fd,
-       0xc921f550,
-       0x0464b608,
-       0x00d011f5,
-       0xbbe0c5c7,
+/* 0x093e: i2c_release */
+       0x21f500f8,
+       0x21f40923,
+       0x03daf004,
+       0xf83321f4,
+/* 0x094d: i2c_recv */
+       0x0132f400,
+       0xb6f8c1c7,
+       0x16b00214,
+       0x3a1ff528,
+       0xf413a001,
+       0x0032980c,
+       0x0ccc13a0,
+       0xf4003198,
+       0xd0f90231,
+       0xd0f9e0f9,
+       0x000067f1,
+       0x100063f1,
+       0xbb016792,
        0x65b60076,
        0x9450f904,
        0x56bb0465,
        0xfd50bd02,
        0x50fc0475,
-       0x086e21f5,
+       0x092f21f5,
+       0xfc0464b6,
+       0x00d6b0d0,
+       0x00b31bf5,
+       0xbb0057f0,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x08dc21f5,
        0xf50464b6,
-       0xf000ad11,
-       0x76bb0157,
+       0xc700d011,
+       0x76bbe0c5,
        0x0465b600,
        0x659450f9,
        0x0256bb04,
        0x75fd50bd,
        0xf550fc04,
-       0xb608c921,
+       0xb6088121,
        0x11f50464,
-       0x76bb008a,
-       0x0465b600,
-       0x659450f9,
-       0x0256bb04,
-       0x75fd50bd,
-       0xf550fc04,
-       0xb6081c21,
-       0x11f40464,
-       0xe05bcb6a,
-       0xb60076bb,
-       0x50f90465,
-       0xbb046594,
-       0x50bd0256,
-       0xfc0475fd,
-       0x6121f550,
-       0x0464b607,
-       0xbd025bb9,
-       0x430ef474,
-/* 0x0a40: i2c_recv_not_rd08 */
-       0xf401d6b0,
-       0x57f03d1b,
-       0xc921f500,
-       0x3311f408,
-       0xf5e0c5c7,
-       0xf4086e21,
-       0x57f02911,
-       0xc921f500,
-       0x1f11f408,
-       0xf5e0b5c7,
-       0xf4086e21,
-       0x21f51511,
-       0x74bd0761,
-       0xf408c5c7,
-       0x32f4091b,
-       0x030ef402,
-/* 0x0a80: i2c_recv_not_wr08 */
-/* 0x0a80: i2c_recv_done */
-       0xf5f8cec7,
-       0xfc092b21,
-       0xf4d0fce0,
-       0x7cb90a12,
-       0xf121f502,
-/* 0x0a95: i2c_recv_exit */
-/* 0x0a97: i2c_init */
+       0x57f000ad,
+       0x0076bb01,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x21f550fc,
+       0x64b608dc,
+       0x8a11f504,
+       0x0076bb00,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x21f550fc,
+       0x64b6082f,
+       0x6a11f404,
+       0xbbe05bcb,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x077421f5,
+       0xb90464b6,
+       0x74bd025b,
+/* 0x0a53: i2c_recv_not_rd08 */
+       0xb0430ef4,
+       0x1bf401d6,
+       0x0057f03d,
+       0x08dc21f5,
+       0xc73311f4,
+       0x21f5e0c5,
+       0x11f40881,
+       0x0057f029,
+       0x08dc21f5,
+       0xc71f11f4,
+       0x21f5e0b5,
+       0x11f40881,
+       0x7421f515,
+       0xc774bd07,
+       0x1bf408c5,
+       0x0232f409,
+/* 0x0a93: i2c_recv_not_wr08 */
+/* 0x0a93: i2c_recv_done */
+       0xc7030ef4,
+       0x21f5f8ce,
+       0xe0fc093e,
+       0x12f4d0fc,
+       0x027cb90a,
+       0x02f121f5,
+/* 0x0aa8: i2c_recv_exit */
+/* 0x0aaa: i2c_init */
+       0x00f800f8,
+/* 0x0aac: test_recv */
+       0x05d817f1,
+       0xb60011cf,
+       0x07f10110,
+       0x01d005d8,
+       0xf104bd00,
+       0xf1d900e7,
+       0xf5134fe3,
+       0xf8022321,
+/* 0x0acd: test_init */
+       0x00e7f100,
+       0x2321f508,
+/* 0x0ad7: idle_recv */
        0xf800f802,
-/* 0x0a99: test_recv */
-       0xd817f100,
-       0x0011cf05,
-       0xf10110b6,
-       0xd005d807,
-       0x04bd0001,
-       0xd900e7f1,
-       0x134fe3f1,
-       0x022321f5,
-/* 0x0aba: test_init */
-       0xe7f100f8,
-       0x21f50800,
-       0x00f80223,
-/* 0x0ac4: idle_recv */
-/* 0x0ac6: idle */
-       0x31f400f8,
-       0xd417f100,
-       0x0011cf05,
-       0xf10110b6,
-       0xd005d407,
-       0x04bd0001,
-/* 0x0adc: idle_loop */
-       0xf45817f0,
-/* 0x0ae2: idle_proc */
-/* 0x0ae2: idle_proc_exec */
-       0x10f90232,
-       0xf5021eb9,
-       0xfc02fa21,
-       0x0911f410,
-       0xf40231f4,
-/* 0x0af6: idle_proc_next */
-       0x10b6ef0e,
-       0x061fb858,
-       0xf4e61bf4,
-       0x28f4dd02,
-       0xc10ef400,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+/* 0x0ad9: idle */
+       0x0031f400,
+       0x05d417f1,
+       0xb60011cf,
+       0x07f10110,
+       0x01d005d4,
+/* 0x0aef: idle_loop */
+       0xf004bd00,
+       0x32f45817,
+/* 0x0af5: idle_proc */
+/* 0x0af5: idle_proc_exec */
+       0xb910f902,
+       0x21f5021e,
+       0x10fc02fa,
+       0xf40911f4,
+       0x0ef40231,
+/* 0x0b09: idle_proc_next */
+       0x5810b6ef,
+       0xf4061fb8,
+       0x02f4e61b,
+       0x0028f4dd,
+       0x00c10ef4,
        0x00000000,
        0x00000000,
        0x00000000,
index 522e3079f82428e401030c8d34c7124865ec7a3d..c8b06cb77e7241cd50cec944b49adcc0325dd703 100644 (file)
 #define MEMX_MSG_INFO 0
 #define MEMX_MSG_EXEC 1
 
+/* MEMX: info types */
+#define MEMX_INFO_DATA  0
+#define MEMX_INFO_TRAIN 1
+
 /* MEMX: script opcode definitions */
 #define MEMX_ENTER  1
 #define MEMX_LEAVE  2
@@ -25,6 +29,7 @@
 #define MEMX_WAIT   4
 #define MEMX_DELAY  5
 #define MEMX_VBLANK 6
+#define MEMX_TRAIN  7
 
 /* I2C_: message identifiers */
 #define I2C__MSG_RD08 0
index 65eaa2546cad2ce5abbcbc32eea9ac008c8bfd2f..7a9299d7159f5a4fa8d1494f01e0c7c6a4ae7c6f 100644 (file)
@@ -47,7 +47,8 @@ nouveau_memx_init(struct nouveau_pwr *ppwr, struct nouveau_memx **pmemx)
        u32 reply[2];
        int ret;
 
-       ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO, 0, 0);
+       ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO,
+                                       MEMX_INFO_DATA, 0);
        if (ret)
                return ret;
 
@@ -106,7 +107,7 @@ nouveau_memx_wait(struct nouveau_memx *memx,
 {
        nv_debug(memx->ppwr, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
                                addr, mask, data, nsec);
-       memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, ~mask, data, nsec });
+       memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
        memx_out(memx); /* fuc can't handle multiple */
 }
 
@@ -151,6 +152,38 @@ nouveau_memx_wait_vblank(struct nouveau_memx *memx)
        memx_out(memx); /* fuc can't handle multiple */
 }
 
+void
+nouveau_memx_train(struct nouveau_memx *memx)
+{
+       nv_debug(memx->ppwr, "   MEM TRAIN\n");
+       memx_cmd(memx, MEMX_TRAIN, 0, NULL);
+}
+
+int
+nouveau_memx_train_result(struct nouveau_pwr *ppwr, u32 *res, int rsize)
+{
+       u32 reply[2], base, size, i;
+       int ret;
+
+       ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO,
+                                       MEMX_INFO_TRAIN, 0);
+       if (ret)
+               return ret;
+
+       base = reply[0];
+       size = reply[1] >> 2;
+       if (size > rsize)
+               return -ENOMEM;
+
+       /* read the packet */
+       nv_wr32(ppwr, 0x10a1c0, 0x02000000 | base);
+
+       for (i = 0; i < size; i++)
+               res[i] = nv_rd32(ppwr, 0x10a1c4);
+
+       return 0;
+}
+
 void
 nouveau_memx_block(struct nouveau_memx *memx)
 {
index 32794a999106c3239c851ba59519e3dd3d61e6e9..26ccd8df193f8b5a5c34f5da068c7c9e1fafc61f 100644 (file)
@@ -101,6 +101,41 @@ nouveau_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
        return ret;
 }
 
+static void nouveau_volt_parse_bios(struct nouveau_bios *bios,
+               struct nouveau_volt *volt)
+{
+       struct nvbios_volt_entry ivid;
+       struct nvbios_volt info;
+       u8  ver, hdr, cnt, len;
+       u16 data;
+       int i;
+
+       data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
+       if (data && info.vidmask && info.base && info.step) {
+               for (i = 0; i < info.vidmask + 1; i++) {
+                       if (info.base >= info.min &&
+                               info.base <= info.max) {
+                               volt->vid[volt->vid_nr].uv = info.base;
+                               volt->vid[volt->vid_nr].vid = i;
+                               volt->vid_nr++;
+                       }
+                       info.base += info.step;
+               }
+               volt->vid_mask = info.vidmask;
+       } else if (data && info.vidmask) {
+               for (i = 0; i < cnt; i++) {
+                       data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
+                                                         &ivid);
+                       if (data) {
+                               volt->vid[volt->vid_nr].uv = ivid.voltage;
+                               volt->vid[volt->vid_nr].vid = ivid.vid;
+                               volt->vid_nr++;
+                       }
+               }
+               volt->vid_mask = info.vidmask;
+       }
+}
+
 int
 _nouveau_volt_init(struct nouveau_object *object)
 {
@@ -136,10 +171,6 @@ nouveau_volt_create_(struct nouveau_object *parent,
 {
        struct nouveau_bios *bios = nouveau_bios(parent);
        struct nouveau_volt *volt;
-       struct nvbios_volt_entry ivid;
-       struct nvbios_volt info;
-       u8  ver, hdr, cnt, len;
-       u16 data;
        int ret, i;
 
        ret = nouveau_subdev_create_(parent, engine, oclass, 0, "VOLT",
@@ -152,31 +183,9 @@ nouveau_volt_create_(struct nouveau_object *parent,
        volt->set = nouveau_volt_set;
        volt->set_id = nouveau_volt_set_id;
 
-       data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
-       if (data && info.vidmask && info.base && info.step) {
-               for (i = 0; i < info.vidmask + 1; i++) {
-                       if (info.base >= info.min &&
-                           info.base <= info.max) {
-                               volt->vid[volt->vid_nr].uv = info.base;
-                               volt->vid[volt->vid_nr].vid = i;
-                               volt->vid_nr++;
-                       }
-                       info.base += info.step;
-               }
-               volt->vid_mask = info.vidmask;
-       } else
-       if (data && info.vidmask) {
-               for (i = 0; i < cnt; i++) {
-                       data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
-                                                     &ivid);
-                       if (data) {
-                               volt->vid[volt->vid_nr].uv = ivid.voltage;
-                               volt->vid[volt->vid_nr].vid = ivid.vid;
-                               volt->vid_nr++;
-                       }
-               }
-               volt->vid_mask = info.vidmask;
-       }
+       /* Assuming the non-bios device should build the voltage table later */
+       if (bios)
+               nouveau_volt_parse_bios(bios, volt);
 
        if (volt->vid_nr) {
                for (i = 0; i < volt->vid_nr; i++) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c
new file mode 100644 (file)
index 0000000..717368e
--- /dev/null
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifdef __KERNEL__
+#include <nouveau_platform.h>
+#endif
+#include <subdev/volt.h>
+
+struct cvb_coef {
+       int c0;
+       int c1;
+       int c2;
+       int c3;
+       int c4;
+       int c5;
+};
+
+struct gk20a_volt_priv {
+       struct nouveau_volt base;
+       struct regulator *vdd;
+};
+
+const struct cvb_coef gk20a_cvb_coef[] = {
+       /* MHz,        c0,     c1,   c2,    c3,     c4,   c5 */
+       /*  72 */ { 1209886, -36468,  515,   417, -13123,  203},
+       /* 108 */ { 1130804, -27659,  296,   298, -10834,  221},
+       /* 180 */ { 1162871, -27110,  247,   238, -10681,  268},
+       /* 252 */ { 1220458, -28654,  247,   179, -10376,  298},
+       /* 324 */ { 1280953, -30204,  247,   119,  -9766,  304},
+       /* 396 */ { 1344547, -31777,  247,   119,  -8545,  292},
+       /* 468 */ { 1420168, -34227,  269,    60,  -7172,  256},
+       /* 540 */ { 1490757, -35955,  274,    60,  -5188,  197},
+       /* 612 */ { 1599112, -42583,  398,     0,  -1831,  119},
+       /* 648 */ { 1366986, -16459, -274,     0,  -3204,   72},
+       /* 684 */ { 1391884, -17078, -274,   -60,  -1526,   30},
+       /* 708 */ { 1415522, -17497, -274,   -60,   -458,    0},
+       /* 756 */ { 1464061, -18331, -274,  -119,   1831,  -72},
+       /* 804 */ { 1524225, -20064, -254,  -119,   4272, -155},
+       /* 852 */ { 1608418, -21643, -269,     0,    763,  -48},
+};
+
+/**
+ * cvb_mv = ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0)
+ */
+static inline int
+gk20a_volt_get_cvb_voltage(int speedo, int s_scale,
+               const struct cvb_coef *coef)
+{
+       int mv;
+
+       mv = DIV_ROUND_CLOSEST(coef->c2 * speedo, s_scale);
+       mv = DIV_ROUND_CLOSEST((mv + coef->c1) * speedo, s_scale) + coef->c0;
+       return mv;
+}
+
+/**
+ * cvb_t_mv =
+ * ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0) +
+ * ((c3 * speedo / s_scale + c4 + c5 * T / t_scale) * T / t_scale)
+ */
+static inline int
+gk20a_volt_get_cvb_t_voltage(int speedo, int temp, int s_scale, int t_scale,
+               const struct cvb_coef *coef)
+{
+       int cvb_mv, mv;
+
+       cvb_mv = gk20a_volt_get_cvb_voltage(speedo, s_scale, coef);
+
+       mv = DIV_ROUND_CLOSEST(coef->c3 * speedo, s_scale) + coef->c4 +
+               DIV_ROUND_CLOSEST(coef->c5 * temp, t_scale);
+       mv = DIV_ROUND_CLOSEST(mv * temp, t_scale) + cvb_mv;
+       return mv;
+}
+
+static int
+gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
+{
+       int mv;
+
+       mv = gk20a_volt_get_cvb_t_voltage(speedo, -10, 100, 10, coef);
+       mv = DIV_ROUND_UP(mv, 1000);
+
+       return mv * 1000;
+}
+
+static int
+gk20a_volt_vid_get(struct nouveau_volt *volt)
+{
+       struct gk20a_volt_priv *priv = (void *)volt;
+       int i, uv;
+
+       uv = regulator_get_voltage(priv->vdd);
+
+       for (i = 0; i < volt->vid_nr; i++)
+               if (volt->vid[i].uv >= uv)
+                       return i;
+
+       return -EINVAL;
+}
+
+static int
+gk20a_volt_vid_set(struct nouveau_volt *volt, u8 vid)
+{
+       struct gk20a_volt_priv *priv = (void *)volt;
+
+       nv_debug(volt, "set voltage as %duv\n", volt->vid[vid].uv);
+       return regulator_set_voltage(priv->vdd, volt->vid[vid].uv, 1200000);
+}
+
+static int
+gk20a_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
+{
+       struct gk20a_volt_priv *priv = (void *)volt;
+       int prev_uv = regulator_get_voltage(priv->vdd);
+       int target_uv = volt->vid[id].uv;
+       int ret;
+
+       nv_debug(volt, "prev=%d, target=%d, condition=%d\n",
+                       prev_uv, target_uv, condition);
+       if (!condition ||
+               (condition < 0 && target_uv < prev_uv) ||
+               (condition > 0 && target_uv > prev_uv)) {
+               ret = gk20a_volt_vid_set(volt, volt->vid[id].vid);
+       } else {
+               ret = 0;
+       }
+
+       return ret;
+}
+
+static int
+gk20a_volt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
+{
+       struct gk20a_volt_priv *priv;
+       struct nouveau_volt *volt;
+       struct nouveau_platform_device *plat;
+       int i, ret, uv;
+
+       ret = nouveau_volt_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       volt = &priv->base;
+
+       plat = nv_device_to_platform(nv_device(parent));
+
+       uv = regulator_get_voltage(plat->gpu->vdd);
+       nv_info(priv, "The default voltage is %duV\n", uv);
+
+       priv->vdd = plat->gpu->vdd;
+       priv->base.vid_get = gk20a_volt_vid_get;
+       priv->base.vid_set = gk20a_volt_vid_set;
+       priv->base.set_id = gk20a_volt_set_id;
+
+       volt->vid_nr = ARRAY_SIZE(gk20a_cvb_coef);
+       nv_debug(priv, "%s - vid_nr = %d\n", __func__, volt->vid_nr);
+       for (i = 0; i < volt->vid_nr; i++) {
+               volt->vid[i].vid = i;
+               volt->vid[i].uv = gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
+                                       plat->gpu_speedo);
+               nv_debug(priv, "%2d: vid=%d, uv=%d\n", i, volt->vid[i].vid,
+                                       volt->vid[i].uv);
+       }
+
+       return 0;
+}
+
+struct nouveau_oclass
+gk20a_volt_oclass = {
+       .handle = NV_SUBDEV(VOLT, 0xea),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = gk20a_volt_ctor,
+               .dtor = _nouveau_volt_dtor,
+               .init = _nouveau_volt_init,
+               .fini = _nouveau_volt_fini,
+       },
+};
index fca6a1f9c20c5f9957f8f66a1a399a73d54f3873..38402ade68351f9ecab27eed823c0a6d131a75b5 100644 (file)
@@ -26,6 +26,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
 
 #include "nouveau_drm.h"
 #include "nouveau_reg.h"
@@ -613,7 +614,7 @@ nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
        int ret;
 
-       ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+       ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, false);
        if (ret == 0) {
                if (disp->image[nv_crtc->index])
                        nouveau_bo_unpin(disp->image[nv_crtc->index]);
@@ -1129,7 +1130,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
        ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
                             0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo);
        if (!ret) {
-               ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+               ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false);
                if (!ret) {
                        ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
                        if (ret)
index 1e9056a8df945917750e9d7effb576e69a9351d9..9f2498571d0953801e30108e439a473b4afff71c 100644 (file)
@@ -126,7 +126,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                        return -ERANGE;
        }
 
-       ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
+       ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
        if (ret)
                return ret;
 
@@ -373,7 +373,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        if (crtc_w < src_w || crtc_h < src_h)
                return -ERANGE;
 
-       ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
+       ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
        if (ret)
                return ret;
 
index a24faa5e2a2ae67a5075cb2f9506b9f93fa66083..d39a150000680f0d5c77023670676d322d37bdb3 100644 (file)
@@ -308,7 +308,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
        ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
                              0, 0, &chan->ntfy);
        if (ret == 0)
-               ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT);
+               ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false);
        if (ret)
                goto done;
 
index dae2c96deef86f882888899f64d952950587882d..7df6acc8bb3413ad847db56c5b234764dbd7124e 100644 (file)
@@ -1258,7 +1258,7 @@ olddcb_table(struct drm_device *dev)
                return NULL;
        }
 
-       if (dcb[0] >= 0x41) {
+       if (dcb[0] >= 0x42) {
                NV_WARN(drm, "DCB version 0x%02x unknown\n", dcb[0]);
                return NULL;
        } else
@@ -1481,18 +1481,22 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
                        entry->dpconf.link_bw = 540000;
                        break;
                }
-               switch ((conf & 0x0f000000) >> 24) {
-               case 0xf:
-                       entry->dpconf.link_nr = 4;
-                       break;
-               case 0x3:
-                       entry->dpconf.link_nr = 2;
-                       break;
-               default:
-                       entry->dpconf.link_nr = 1;
-                       break;
+               entry->dpconf.link_nr = (conf & 0x0f000000) >> 24;
+               if (dcb->version < 0x41) {
+                       switch (entry->dpconf.link_nr) {
+                       case 0xf:
+                               entry->dpconf.link_nr = 4;
+                               break;
+                       case 0x3:
+                               entry->dpconf.link_nr = 2;
+                               break;
+                       default:
+                               entry->dpconf.link_nr = 1;
+                               break;
+                       }
                }
                link = entry->dpconf.sor.link;
+               entry->i2c_index += NV_I2C_AUX(0);
                break;
        case DCB_OUTPUT_TMDS:
                if (dcb->version >= 0x40) {
index 3d474ac03f8847c7985c2c664a88bed1cfa189b0..21ec561edc999458c5a8d4f1a99be19e67070ca0 100644 (file)
@@ -214,6 +214,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
        nvbo->tile_flags = tile_flags;
        nvbo->bo.bdev = &drm->ttm.bdev;
 
+       if (!nv_device_is_cpu_coherent(nvkm_device(&drm->device)))
+               nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
+
        nvbo->page_shift = 12;
        if (drm->client.vm) {
                if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
@@ -291,8 +294,9 @@ void
 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
 {
        struct ttm_placement *pl = &nvbo->placement;
-       uint32_t flags = TTM_PL_MASK_CACHING |
-               (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
+       uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
+                                                TTM_PL_MASK_CACHING) |
+                        (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
 
        pl->placement = nvbo->placements;
        set_placement_list(nvbo->placements, &pl->num_placement,
@@ -306,42 +310,75 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
 }
 
 int
-nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
+nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
 {
        struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
        struct ttm_buffer_object *bo = &nvbo->bo;
+       bool force = false, evict = false;
        int ret;
 
        ret = ttm_bo_reserve(bo, false, false, false, NULL);
        if (ret)
-               goto out;
+               return ret;
 
-       if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
-               NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
-                        1 << bo->mem.mem_type, memtype);
-               ret = -EINVAL;
-               goto out;
+       if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
+           memtype == TTM_PL_FLAG_VRAM && contig) {
+               if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
+                       if (bo->mem.mem_type == TTM_PL_VRAM) {
+                               struct nouveau_mem *mem = bo->mem.mm_node;
+                               if (!list_is_singular(&mem->regions))
+                                       evict = true;
+                       }
+                       nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
+                       force = true;
+               }
        }
 
-       if (nvbo->pin_refcnt++)
+       if (nvbo->pin_refcnt) {
+               if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
+                       NV_ERROR(drm, "bo %p pinned elsewhere: "
+                                     "0x%08x vs 0x%08x\n", bo,
+                                1 << bo->mem.mem_type, memtype);
+                       ret = -EBUSY;
+               }
+               nvbo->pin_refcnt++;
                goto out;
+       }
 
+       if (evict) {
+               nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
+               ret = nouveau_bo_validate(nvbo, false, false);
+               if (ret)
+                       goto out;
+       }
+
+       nvbo->pin_refcnt++;
        nouveau_bo_placement_set(nvbo, memtype, 0);
 
+       /* drop pin_refcnt temporarily, so we don't trip the assertion
+        * in nouveau_bo_move() that makes sure we're not trying to
+        * move a pinned buffer
+        */
+       nvbo->pin_refcnt--;
        ret = nouveau_bo_validate(nvbo, false, false);
-       if (ret == 0) {
-               switch (bo->mem.mem_type) {
-               case TTM_PL_VRAM:
-                       drm->gem.vram_available -= bo->mem.size;
-                       break;
-               case TTM_PL_TT:
-                       drm->gem.gart_available -= bo->mem.size;
-                       break;
-               default:
-                       break;
-               }
+       if (ret)
+               goto out;
+       nvbo->pin_refcnt++;
+
+       switch (bo->mem.mem_type) {
+       case TTM_PL_VRAM:
+               drm->gem.vram_available -= bo->mem.size;
+               break;
+       case TTM_PL_TT:
+               drm->gem.gart_available -= bo->mem.size;
+               break;
+       default:
+               break;
        }
+
 out:
+       if (force && ret)
+               nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
        ttm_bo_unreserve(bo);
        return ret;
 }
@@ -392,7 +429,14 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
        if (ret)
                return ret;
 
-       ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
+       /*
+        * TTM buffers allocated using the DMA API already have a mapping, let's
+        * use it instead.
+        */
+       if (!nvbo->force_coherent)
+               ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
+                                 &nvbo->kmap);
+
        ttm_bo_unreserve(&nvbo->bo);
        return ret;
 }
@@ -400,10 +444,57 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
 void
 nouveau_bo_unmap(struct nouveau_bo *nvbo)
 {
-       if (nvbo)
+       if (!nvbo)
+               return;
+
+       /*
+        * TTM buffers allocated using the DMA API already had a coherent
+        * mapping which we used, no need to unmap.
+        */
+       if (!nvbo->force_coherent)
                ttm_bo_kunmap(&nvbo->kmap);
 }
 
+void
+nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
+{
+       struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+       struct nouveau_device *device = nvkm_device(&drm->device);
+       struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
+       int i;
+
+       if (!ttm_dma)
+               return;
+
+       /* Don't waste time looping if the object is coherent */
+       if (nvbo->force_coherent)
+               return;
+
+       for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+               dma_sync_single_for_device(nv_device_base(device),
+                       ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE);
+}
+
+void
+nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
+{
+       struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+       struct nouveau_device *device = nvkm_device(&drm->device);
+       struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
+       int i;
+
+       if (!ttm_dma)
+               return;
+
+       /* Don't waste time looping if the object is coherent */
+       if (nvbo->force_coherent)
+               return;
+
+       for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+               dma_sync_single_for_cpu(nv_device_base(device),
+                       ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE);
+}
+
 int
 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
                    bool no_wait_gpu)
@@ -415,15 +506,41 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
        if (ret)
                return ret;
 
+       nouveau_bo_sync_for_device(nvbo);
+
        return 0;
 }
 
+static inline void *
+_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
+{
+       struct ttm_dma_tt *dma_tt;
+       u8 *m = mem;
+
+       index *= sz;
+
+       if (m) {
+               /* kmap'd address, return the corresponding offset */
+               m += index;
+       } else {
+               /* DMA-API mapping, lookup the right address */
+               dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
+               m = dma_tt->cpu_address[index / PAGE_SIZE];
+               m += index % PAGE_SIZE;
+       }
+
+       return m;
+}
+#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
+
 u16
 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
 {
        bool is_iomem;
        u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
-       mem = &mem[index];
+
+       mem = nouveau_bo_mem_index(nvbo, index, mem);
+
        if (is_iomem)
                return ioread16_native((void __force __iomem *)mem);
        else
@@ -435,7 +552,9 @@ nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
 {
        bool is_iomem;
        u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
-       mem = &mem[index];
+
+       mem = nouveau_bo_mem_index(nvbo, index, mem);
+
        if (is_iomem)
                iowrite16_native(val, (void __force __iomem *)mem);
        else
@@ -447,7 +566,9 @@ nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
 {
        bool is_iomem;
        u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
-       mem = &mem[index];
+
+       mem = nouveau_bo_mem_index(nvbo, index, mem);
+
        if (is_iomem)
                return ioread32_native((void __force __iomem *)mem);
        else
@@ -459,7 +580,9 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
 {
        bool is_iomem;
        u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
-       mem = &mem[index];
+
+       mem = nouveau_bo_mem_index(nvbo, index, mem);
+
        if (is_iomem)
                iowrite32_native(val, (void __force __iomem *)mem);
        else
@@ -1184,6 +1307,9 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
        struct nouveau_drm_tile *new_tile = NULL;
        int ret = 0;
 
+       if (nvbo->pin_refcnt)
+               NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
+
        if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
                ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
                if (ret)
@@ -1376,6 +1502,14 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
        dev = drm->dev;
        pdev = nv_device_base(device);
 
+       /*
+        * Objects matching this condition have been marked as force_coherent,
+        * so use the DMA API for them.
+        */
+       if (!nv_device_is_cpu_coherent(device) &&
+           ttm->caching_state == tt_uncached)
+               return ttm_dma_populate(ttm_dma, dev->dev);
+
 #if __OS_HAS_AGP
        if (drm->agp.stat == ENABLED) {
                return ttm_agp_tt_populate(ttm);
@@ -1433,6 +1567,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
        dev = drm->dev;
        pdev = nv_device_base(device);
 
+       /*
+        * Objects matching this condition have been marked as force_coherent,
+        * so use the DMA API for them.
+        */
+       if (!nv_device_is_cpu_coherent(device) &&
+           ttm->caching_state == tt_uncached)
+               ttm_dma_unpopulate(ttm_dma, dev->dev);
+
 #if __OS_HAS_AGP
        if (drm->agp.stat == ENABLED) {
                ttm_agp_tt_unpopulate(ttm);
index 22d2c764d80bd17c9fe755abc56b225722eee513..072222efeeb765467605f844d22e340180cda048 100644 (file)
@@ -13,6 +13,7 @@ struct nouveau_bo {
        u32 valid_domains;
        struct ttm_place placements[3];
        struct ttm_place busy_placements[3];
+       bool force_coherent;
        struct ttm_bo_kmap_obj kmap;
        struct list_head head;
 
@@ -72,7 +73,7 @@ int  nouveau_bo_new(struct drm_device *, int size, int align, u32 flags,
                    u32 tile_mode, u32 tile_flags, struct sg_table *sg,
                    struct reservation_object *robj,
                    struct nouveau_bo **);
-int  nouveau_bo_pin(struct nouveau_bo *, u32 flags);
+int  nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig);
 int  nouveau_bo_unpin(struct nouveau_bo *);
 int  nouveau_bo_map(struct nouveau_bo *);
 void nouveau_bo_unmap(struct nouveau_bo *);
@@ -84,6 +85,8 @@ void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
 void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
 int  nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
                         bool no_wait_gpu);
+void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
+void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
 
 struct nouveau_vma *
 nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
index fd3dbd59d73e18113a6ef357e658570c113155be..aff9099aae6cca23b6e51247ee07068f23b691c1 100644 (file)
@@ -102,14 +102,14 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
        chan->drm = drm;
 
        /* allocate memory for dma push buffer */
-       target = TTM_PL_FLAG_TT;
+       target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
        if (nouveau_vram_pushbuf)
                target = TTM_PL_FLAG_VRAM;
 
        ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL,
                            &chan->push.buffer);
        if (ret == 0) {
-               ret = nouveau_bo_pin(chan->push.buffer, target);
+               ret = nouveau_bo_pin(chan->push.buffer, target, false);
                if (ret == 0)
                        ret = nouveau_bo_map(chan->push.buffer);
        }
@@ -285,7 +285,6 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
        struct nouveau_software_chan *swch;
        struct nv_dma_v0 args = {};
        int ret, i;
-       bool save;
 
        nvif_object_map(chan->object);
 
@@ -387,11 +386,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
        }
 
        /* initialise synchronisation */
-       save = cli->base.super;
-       cli->base.super = true; /* hack until fencenv50 fixed */
-       ret = nouveau_fence(chan->drm)->context_new(chan);
-       cli->base.super = save;
-       return ret;
+       return nouveau_fence(chan->drm)->context_new(chan);
 }
 
 int
index a88e6927f5713e29a0db95a769dd16401f9f920f..5d93902a91ab038cfe040197f767c0de05d7931f 100644 (file)
@@ -479,6 +479,7 @@ nouveau_display_create(struct drm_device *dev)
 
        if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
                static const u16 oclass[] = {
+                       GM204_DISP,
                        GM107_DISP,
                        GK110_DISP,
                        GK104_DISP,
@@ -568,9 +569,10 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
-               nouveau_bo_unmap(nv_crtc->cursor.nvbo);
-               nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+               if (nv_crtc->cursor.nvbo) {
+                       nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+                       nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+               }
        }
 
        return 0;
@@ -591,15 +593,17 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
                if (!nouveau_fb || !nouveau_fb->nvbo)
                        continue;
 
-               ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
+               ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM, true);
                if (ret)
                        NV_ERROR(drm, "Could not pin framebuffer\n");
        }
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+               if (!nv_crtc->cursor.nvbo)
+                       continue;
 
-               ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+               ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, true);
                if (!ret)
                        ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
                if (ret)
@@ -630,9 +634,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-               u32 offset = nv_crtc->cursor.nvbo->bo.offset;
 
-               nv_crtc->cursor.set_offset(nv_crtc, offset);
+               if (!nv_crtc->cursor.nvbo)
+                       continue;
+               nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
                nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
                                                 nv_crtc->cursor_saved_y);
        }
@@ -710,7 +715,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                return -ENOMEM;
 
        if (new_bo != old_bo) {
-               ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
+               ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM, true);
                if (ret)
                        goto fail_free;
        }
@@ -871,6 +876,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
        if (ret)
                return ret;
 
+       bo->gem.dumb = true;
        ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
        drm_gem_object_unreference_unlocked(&bo->gem);
        return ret;
@@ -886,6 +892,14 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
        gem = drm_gem_object_lookup(dev, file_priv, handle);
        if (gem) {
                struct nouveau_bo *bo = nouveau_gem_object(gem);
+
+               /*
+                * We don't allow dumb mmaps on objects created using another
+                * interface.
+                */
+               WARN_ONCE(!(gem->dumb || gem->import_attach),
+                         "Illegal dumb map of accelerated buffer.\n");
+
                *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
                drm_gem_object_unreference_unlocked(gem);
                return 0;
index 57238076049f6fe16717ba6626bca954b1910aeb..65910e3aed0c86e1240273f274f42ea947373d58 100644 (file)
@@ -613,27 +613,6 @@ fail_display:
        return ret;
 }
 
-int nouveau_pmops_suspend(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       int ret;
-
-       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
-           drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
-               return 0;
-
-       ret = nouveau_do_suspend(drm_dev, false);
-       if (ret)
-               return ret;
-
-       pci_save_state(pdev);
-       pci_disable_device(pdev);
-       pci_ignore_hotplug(pdev);
-       pci_set_power_state(pdev, PCI_D3hot);
-       return 0;
-}
-
 static int
 nouveau_do_resume(struct drm_device *dev, bool runtime)
 {
@@ -668,7 +647,29 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
        return 0;
 }
 
-int nouveau_pmops_resume(struct device *dev)
+int
+nouveau_pmops_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       int ret;
+
+       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+           drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+               return 0;
+
+       ret = nouveau_do_suspend(drm_dev, false);
+       if (ret)
+               return ret;
+
+       pci_save_state(pdev);
+       pci_disable_device(pdev);
+       pci_set_power_state(pdev, PCI_D3hot);
+       return 0;
+}
+
+int
+nouveau_pmops_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
@@ -688,20 +689,122 @@ int nouveau_pmops_resume(struct device *dev)
        return nouveau_do_resume(drm_dev, false);
 }
 
-static int nouveau_pmops_freeze(struct device *dev)
+static int
+nouveau_pmops_freeze(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
        return nouveau_do_suspend(drm_dev, false);
 }
 
-static int nouveau_pmops_thaw(struct device *dev)
+static int
+nouveau_pmops_thaw(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
        return nouveau_do_resume(drm_dev, false);
 }
 
+static int
+nouveau_pmops_runtime_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       int ret;
+
+       if (nouveau_runtime_pm == 0) {
+               pm_runtime_forbid(dev);
+               return -EBUSY;
+       }
+
+       /* are we optimus enabled? */
+       if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
+               DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+               pm_runtime_forbid(dev);
+               return -EBUSY;
+       }
+
+       nv_debug_level(SILENT);
+       drm_kms_helper_poll_disable(drm_dev);
+       vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
+       nouveau_switcheroo_optimus_dsm();
+       ret = nouveau_do_suspend(drm_dev, true);
+       pci_save_state(pdev);
+       pci_disable_device(pdev);
+       pci_ignore_hotplug(pdev);
+       pci_set_power_state(pdev, PCI_D3cold);
+       drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+       return ret;
+}
+
+static int
+nouveau_pmops_runtime_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       struct nvif_device *device = &nouveau_drm(drm_dev)->device;
+       int ret;
+
+       if (nouveau_runtime_pm == 0)
+               return -EINVAL;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       ret = pci_enable_device(pdev);
+       if (ret)
+               return ret;
+       pci_set_master(pdev);
+
+       ret = nouveau_do_resume(drm_dev, true);
+       drm_kms_helper_poll_enable(drm_dev);
+       /* do magic */
+       nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
+       vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
+       drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+       nv_debug_level(NORMAL);
+       return ret;
+}
+
+static int
+nouveau_pmops_runtime_idle(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       struct nouveau_drm *drm = nouveau_drm(drm_dev);
+       struct drm_crtc *crtc;
+
+       if (nouveau_runtime_pm == 0) {
+               pm_runtime_forbid(dev);
+               return -EBUSY;
+       }
+
+       /* are we optimus enabled? */
+       if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
+               DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+               pm_runtime_forbid(dev);
+               return -EBUSY;
+       }
+
+       /* if we have a hdmi audio device - make sure it has a driver loaded */
+       if (drm->hdmi_device) {
+               if (!drm->hdmi_device->driver) {
+                       DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n");
+                       pm_runtime_mark_last_busy(dev);
+                       return -EBUSY;
+               }
+       }
+
+       list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
+               if (crtc->enabled) {
+                       DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+                       return -EBUSY;
+               }
+       }
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_autosuspend(dev);
+       /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
+       return 1;
+}
 
 static int
 nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
@@ -908,103 +1011,6 @@ nouveau_drm_pci_table[] = {
        {}
 };
 
-static int nouveau_pmops_runtime_suspend(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       int ret;
-
-       if (nouveau_runtime_pm == 0) {
-               pm_runtime_forbid(dev);
-               return -EBUSY;
-       }
-
-       /* are we optimus enabled? */
-       if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
-               DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
-               pm_runtime_forbid(dev);
-               return -EBUSY;
-       }
-
-       nv_debug_level(SILENT);
-       drm_kms_helper_poll_disable(drm_dev);
-       vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
-       nouveau_switcheroo_optimus_dsm();
-       ret = nouveau_do_suspend(drm_dev, true);
-       pci_save_state(pdev);
-       pci_disable_device(pdev);
-       pci_set_power_state(pdev, PCI_D3cold);
-       drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
-       return ret;
-}
-
-static int nouveau_pmops_runtime_resume(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       struct nvif_device *device = &nouveau_drm(drm_dev)->device;
-       int ret;
-
-       if (nouveau_runtime_pm == 0)
-               return -EINVAL;
-
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-       ret = pci_enable_device(pdev);
-       if (ret)
-               return ret;
-       pci_set_master(pdev);
-
-       ret = nouveau_do_resume(drm_dev, true);
-       drm_kms_helper_poll_enable(drm_dev);
-       /* do magic */
-       nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
-       vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
-       drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
-       nv_debug_level(NORMAL);
-       return ret;
-}
-
-static int nouveau_pmops_runtime_idle(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       struct nouveau_drm *drm = nouveau_drm(drm_dev);
-       struct drm_crtc *crtc;
-
-       if (nouveau_runtime_pm == 0) {
-               pm_runtime_forbid(dev);
-               return -EBUSY;
-       }
-
-       /* are we optimus enabled? */
-       if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
-               DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
-               pm_runtime_forbid(dev);
-               return -EBUSY;
-       }
-
-       /* if we have a hdmi audio device - make sure it has a driver loaded */
-       if (drm->hdmi_device) {
-               if (!drm->hdmi_device->driver) {
-                       DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n");
-                       pm_runtime_mark_last_busy(dev);
-                       return -EBUSY;
-               }
-       }
-
-       list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
-               if (crtc->enabled) {
-                       DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
-                       return -EBUSY;
-               }
-       }
-       pm_runtime_mark_last_busy(dev);
-       pm_runtime_autosuspend(dev);
-       /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
-       return 1;
-}
-
 static void nouveau_display_options(void)
 {
        DRM_DEBUG_DRIVER("Loading Nouveau with parameters:\n");
index 593ef8a2a069e16b4fdf96e9c95b67ca8ebd1b95..3ed12a8cfc914520fc097ac98e5e16e63ed0d532 100644 (file)
@@ -341,7 +341,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
                goto out;
        }
 
-       ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
+       ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
        if (ret) {
                NV_ERROR(drm, "failed to pin fb: %d\n", ret);
                goto out_unref;
@@ -498,6 +498,23 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work)
        console_unlock();
 }
 
+void
+nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+{
+       struct nouveau_drm *drm = nouveau_drm(dev);
+       if (drm->fbcon) {
+               if (state == FBINFO_STATE_RUNNING) {
+                       schedule_work(&drm->fbcon->work);
+                       return;
+               }
+               flush_work(&drm->fbcon->work);
+               console_lock();
+               fb_set_suspend(drm->fbcon->helper.fbdev, state);
+               nouveau_fbcon_accel_save_disable(dev);
+               console_unlock();
+       }
+}
+
 int
 nouveau_fbcon_init(struct drm_device *dev)
 {
@@ -557,20 +574,3 @@ nouveau_fbcon_fini(struct drm_device *dev)
        kfree(drm->fbcon);
        drm->fbcon = NULL;
 }
-
-void
-nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon) {
-               if (state == FBINFO_STATE_RUNNING) {
-                       schedule_work(&drm->fbcon->work);
-                       return;
-               }
-               flush_work(&drm->fbcon->work);
-               console_lock();
-               fb_set_suspend(drm->fbcon->helper.fbdev, state);
-               nouveau_fbcon_accel_save_disable(dev);
-               console_unlock();
-       }
-}
index 515cd9aebb9982d1c0c0c06f1deaf4d0c7bf3e5c..f32a434724e307f5da958de0bdf26bb09d115f4b 100644 (file)
@@ -52,20 +52,24 @@ nouveau_fctx(struct nouveau_fence *fence)
        return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
 }
 
-static void
+static int
 nouveau_fence_signal(struct nouveau_fence *fence)
 {
+       int drop = 0;
+
        fence_signal_locked(&fence->base);
        list_del(&fence->head);
+       rcu_assign_pointer(fence->channel, NULL);
 
        if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
                struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 
                if (!--fctx->notify_ref)
-                       nvif_notify_put(&fctx->notify);
+                       drop = 1;
        }
 
        fence_put(&fence->base);
+       return drop;
 }
 
 static struct nouveau_fence *
@@ -88,16 +92,23 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
 {
        struct nouveau_fence *fence;
 
-       nvif_notify_fini(&fctx->notify);
-
        spin_lock_irq(&fctx->lock);
        while (!list_empty(&fctx->pending)) {
                fence = list_entry(fctx->pending.next, typeof(*fence), head);
 
-               nouveau_fence_signal(fence);
-               fence->channel = NULL;
+               if (nouveau_fence_signal(fence))
+                       nvif_notify_put(&fctx->notify);
        }
        spin_unlock_irq(&fctx->lock);
+
+       nvif_notify_fini(&fctx->notify);
+       fctx->dead = 1;
+
+       /*
+        * Ensure that all accesses to fence->channel complete before freeing
+        * the channel.
+        */
+       synchronize_rcu();
 }
 
 static void
@@ -112,21 +123,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
        kref_put(&fctx->fence_ref, nouveau_fence_context_put);
 }
 
-static void
+static int
 nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
 {
        struct nouveau_fence *fence;
-
+       int drop = 0;
        u32 seq = fctx->read(chan);
 
        while (!list_empty(&fctx->pending)) {
                fence = list_entry(fctx->pending.next, typeof(*fence), head);
 
                if ((int)(seq - fence->base.seqno) < 0)
-                       return;
+                       break;
 
-               nouveau_fence_signal(fence);
+               drop |= nouveau_fence_signal(fence);
        }
+
+       return drop;
 }
 
 static int
@@ -135,18 +148,21 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
        struct nouveau_fence_chan *fctx =
                container_of(notify, typeof(*fctx), notify);
        unsigned long flags;
+       int ret = NVIF_NOTIFY_KEEP;
 
        spin_lock_irqsave(&fctx->lock, flags);
        if (!list_empty(&fctx->pending)) {
                struct nouveau_fence *fence;
+               struct nouveau_channel *chan;
 
                fence = list_entry(fctx->pending.next, typeof(*fence), head);
-               nouveau_fence_update(fence->channel, fctx);
+               chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
+               if (nouveau_fence_update(fence->channel, fctx))
+                       ret = NVIF_NOTIFY_DROP;
        }
        spin_unlock_irqrestore(&fctx->lock, flags);
 
-       /* Always return keep here. NVIF refcount is handled with nouveau_fence_update */
-       return NVIF_NOTIFY_KEEP;
+       return ret;
 }
 
 void
@@ -262,7 +278,10 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
        if (!ret) {
                fence_get(&fence->base);
                spin_lock_irq(&fctx->lock);
-               nouveau_fence_update(chan, fctx);
+
+               if (nouveau_fence_update(chan, fctx))
+                       nvif_notify_put(&fctx->notify);
+
                list_add_tail(&fence->head, &fctx->pending);
                spin_unlock_irq(&fctx->lock);
        }
@@ -276,13 +295,16 @@ nouveau_fence_done(struct nouveau_fence *fence)
        if (fence->base.ops == &nouveau_fence_ops_legacy ||
            fence->base.ops == &nouveau_fence_ops_uevent) {
                struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+               struct nouveau_channel *chan;
                unsigned long flags;
 
                if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
                        return true;
 
                spin_lock_irqsave(&fctx->lock, flags);
-               nouveau_fence_update(fence->channel, fctx);
+               chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
+               if (chan && nouveau_fence_update(chan, fctx))
+                       nvif_notify_put(&fctx->notify);
                spin_unlock_irqrestore(&fctx->lock, flags);
        }
        return fence_is_signaled(&fence->base);
@@ -387,12 +409,18 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 
        if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
                struct nouveau_channel *prev = NULL;
+               bool must_wait = true;
 
                f = nouveau_local_fence(fence, chan->drm);
-               if (f)
-                       prev = f->channel;
+               if (f) {
+                       rcu_read_lock();
+                       prev = rcu_dereference(f->channel);
+                       if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
+                               must_wait = false;
+                       rcu_read_unlock();
+               }
 
-               if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
+               if (must_wait)
                        ret = fence_wait(fence, intr);
 
                return ret;
@@ -403,19 +431,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 
        for (i = 0; i < fobj->shared_count && !ret; ++i) {
                struct nouveau_channel *prev = NULL;
+               bool must_wait = true;
 
                fence = rcu_dereference_protected(fobj->shared[i],
                                                reservation_object_held(resv));
 
                f = nouveau_local_fence(fence, chan->drm);
-               if (f)
-                       prev = f->channel;
+               if (f) {
+                       rcu_read_lock();
+                       prev = rcu_dereference(f->channel);
+                       if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
+                               must_wait = false;
+                       rcu_read_unlock();
+               }
 
-               if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
+               if (must_wait)
                        ret = fence_wait(fence, intr);
-
-               if (ret)
-                       break;
        }
 
        return ret;
@@ -463,7 +494,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
        struct nouveau_fence *fence = from_fence(f);
        struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 
-       return fence->channel ? fctx->name : "dead channel";
+       return !fctx->dead ? fctx->name : "dead channel";
 }
 
 /*
@@ -476,9 +507,16 @@ static bool nouveau_fence_is_signaled(struct fence *f)
 {
        struct nouveau_fence *fence = from_fence(f);
        struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
-       struct nouveau_channel *chan = fence->channel;
+       struct nouveau_channel *chan;
+       bool ret = false;
+
+       rcu_read_lock();
+       chan = rcu_dereference(fence->channel);
+       if (chan)
+               ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
+       rcu_read_unlock();
 
-       return (int)(fctx->read(chan) - fence->base.seqno) >= 0;
+       return ret;
 }
 
 static bool nouveau_fence_no_signaling(struct fence *f)
index 943b0b17b1fc760296eccea9b409f03696a644fc..96e461c6f68fac370602777d8e5bd363100fa0a5 100644 (file)
@@ -14,7 +14,7 @@ struct nouveau_fence {
 
        bool sysmem;
 
-       struct nouveau_channel *channel;
+       struct nouveau_channel __rcu *channel;
        unsigned long timeout;
 };
 
@@ -47,7 +47,7 @@ struct nouveau_fence_chan {
        char name[32];
 
        struct nvif_notify notify;
-       int notify_ref;
+       int notify_ref, dead;
 };
 
 struct nouveau_fence_priv {
index 36951ee4b1571807d9a1f85c796f00b232e043d6..28d51a22a4bf18b5729f8f4e9cd159c830cbbfca 100644 (file)
@@ -444,6 +444,9 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
        list_for_each_entry(nvbo, list, entry) {
                struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 
+               WARN_ONCE(nvbo->gem.dumb,
+                         "GPU use of dumb buffer is illegal.\n");
+
                ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
                                             b->write_domains,
                                             b->valid_domains);
@@ -867,6 +870,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
                else
                        ret = lret;
        }
+       nouveau_bo_sync_for_cpu(nvbo);
        drm_gem_object_unreference_unlocked(gem);
 
        return ret;
@@ -876,6 +880,17 @@ int
 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
                           struct drm_file *file_priv)
 {
+       struct drm_nouveau_gem_cpu_fini *req = data;
+       struct drm_gem_object *gem;
+       struct nouveau_bo *nvbo;
+
+       gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+       if (!gem)
+               return -ENOENT;
+       nvbo = nouveau_gem_object(gem);
+
+       nouveau_bo_sync_for_device(nvbo);
+       drm_gem_object_unreference_unlocked(gem);
        return 0;
 }
 
index 246a824c16ca9a88acc05c5f464a0d7a5f387a23..b307bbedd4c43548387557ca0cd4a44a5654ae4e 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/of.h>
 #include <linux/reset.h>
 #include <linux/regulator/consumer.h>
+#include <soc/tegra/fuse.h>
 #include <soc/tegra/pmc.h>
 
 #include "nouveau_drm.h"
@@ -128,6 +129,7 @@ static int nouveau_platform_probe(struct platform_device *pdev)
        }
 
        device->gpu = gpu;
+       device->gpu_speedo = tegra_sku_info.gpu_speedo_value;
 
        err = drm_dev_register(drm, 0);
        if (err < 0)
index 91f66504900ef21bbdd2de8488649c58dccd5cfa..58c28b5653d5a8ade0718edd509646888bdf9a6d 100644 (file)
@@ -41,6 +41,8 @@ struct nouveau_platform_device {
        struct nouveau_device device;
 
        struct nouveau_platform_gpu *gpu;
+
+       int gpu_speedo;
 };
 
 #define nv_device_to_platform(d)                                               \
index 228226ab27fc1b55883d9d7bcf6066f5c3852bc5..dd32ad6db53d2525bfbdde846a87f8b369269cc8 100644 (file)
@@ -93,7 +93,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
        int ret;
 
        /* pin buffer into GTT */
-       ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
+       ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT, false);
        if (ret)
                return -EINVAL;
 
index 40b461c7d5c565c0471c5f3b80f09221a7d42a8b..57860cfa1de5c2e2ebb2947773d8e0ff12f613a0 100644 (file)
@@ -131,7 +131,7 @@ nv17_fence_create(struct nouveau_drm *drm)
        ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
                             0, 0x0000, NULL, NULL, &priv->bo);
        if (!ret) {
-               ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+               ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
                if (!ret) {
                        ret = nouveau_bo_map(priv->bo);
                        if (ret)
index ae873d1a8d463f7cd55da92b550eec1c44d31e25..490b90866baf4cae9feeb4880d33d03e47528bdc 100644 (file)
@@ -26,6 +26,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
 #include <drm/drm_dp_helper.h>
 
 #include <nvif/class.h>
@@ -65,15 +66,29 @@ static int
 nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head,
                 void *data, u32 size, struct nv50_chan *chan)
 {
+       const u32 handle = (oclass[0] << 16) | head;
+       u32 sclass[8];
+       int ret, i;
+
+       ret = nvif_object_sclass(disp, sclass, ARRAY_SIZE(sclass));
+       WARN_ON(ret > ARRAY_SIZE(sclass));
+       if (ret < 0)
+               return ret;
+
        while (oclass[0]) {
-               int ret = nvif_object_init(disp, NULL, (oclass[0] << 16) | head,
-                                          oclass[0], data, size,
-                                         &chan->user);
-               if (oclass++, ret == 0) {
-                       nvif_object_map(&chan->user);
-                       return ret;
+               for (i = 0; i < ARRAY_SIZE(sclass); i++) {
+                       if (sclass[i] == oclass[0]) {
+                               ret = nvif_object_init(disp, NULL, handle,
+                                                      oclass[0], data, size,
+                                                      &chan->user);
+                               if (ret == 0)
+                                       nvif_object_map(&chan->user);
+                               return ret;
+                       }
                }
+               oclass++;
        }
+
        return -ENOSYS;
 }
 
@@ -110,6 +125,7 @@ nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head,
 
 struct nv50_curs {
        struct nv50_pioc base;
+       struct nouveau_bo *image;
 };
 
 static int
@@ -265,6 +281,7 @@ nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core)
                .pushbuf = 0xb0007d00,
        };
        static const u32 oclass[] = {
+               GM204_DISP_CORE_CHANNEL_DMA,
                GM107_DISP_CORE_CHANNEL_DMA,
                GK110_DISP_CORE_CHANNEL_DMA,
                GK104_DISP_CORE_CHANNEL_DMA,
@@ -424,8 +441,21 @@ evo_kick(u32 *push, void *evoc)
        mutex_unlock(&dmac->lock);
 }
 
+#if 1
 #define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
 #define evo_data(p,d)   *((p)++) = (d)
+#else
+#define evo_mthd(p,m,s) do {                                                   \
+       const u32 _m = (m), _s = (s);                                          \
+       printk(KERN_ERR "%04x %d %s\n", _m, _s, __func__);                     \
+       *((p)++) = ((_s << 18) | _m);                                          \
+} while(0)
+#define evo_data(p,d) do {                                                     \
+       const u32 _d = (d);                                                    \
+       printk(KERN_ERR "\t%08x\n", _d);                                       \
+       *((p)++) = _d;                                                         \
+} while(0)
+#endif
 
 static bool
 evo_sync_wait(void *data)
@@ -790,6 +820,22 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
        return 0;
 }
 
+static int
+nv50_crtc_set_raster_vblank_dmi(struct nouveau_crtc *nv_crtc, u32 usec)
+{
+       struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+       u32 *push;
+
+       push = evo_wait(mast, 8);
+       if (!push)
+               return -ENOMEM;
+
+       evo_mthd(push, 0x0828 + (nv_crtc->index * 0x400), 1);
+       evo_data(push, usec);
+       evo_kick(push, mast);
+       return 0;
+}
+
 static int
 nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
 {
@@ -871,23 +917,24 @@ static void
 nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
 {
        struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+       struct nv50_curs *curs = nv50_curs(&nv_crtc->base);
        u32 *push = evo_wait(mast, 16);
        if (push) {
                if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
                        evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
                        evo_data(push, 0x85000000);
-                       evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+                       evo_data(push, curs->image->bo.offset >> 8);
                } else
                if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
                        evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
                        evo_data(push, 0x85000000);
-                       evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+                       evo_data(push, curs->image->bo.offset >> 8);
                        evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
                        evo_data(push, mast->base.vram.handle);
                } else {
                        evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
                        evo_data(push, 0x85000000);
-                       evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+                       evo_data(push, curs->image->bo.offset >> 8);
                        evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
                        evo_data(push, mast->base.vram.handle);
                }
@@ -924,8 +971,9 @@ static void
 nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
 {
        struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+       struct nv50_curs *curs = nv50_curs(&nv_crtc->base);
 
-       if (show)
+       if (show && curs->image)
                nv50_crtc_cursor_show(nv_crtc);
        else
                nv50_crtc_cursor_hide(nv_crtc);
@@ -1025,7 +1073,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
                evo_kick(push, mast);
        }
 
-       nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true);
+       nv50_crtc_cursor_show_hide(nv_crtc, true, true);
        nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
 }
 
@@ -1044,7 +1092,7 @@ nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
        struct nv50_head *head = nv50_head(crtc);
        int ret;
 
-       ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+       ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, true);
        if (ret == 0) {
                if (head->image)
                        nouveau_bo_unpin(head->image);
@@ -1104,14 +1152,14 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
                        evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
                        evo_data(push, 0x00800000 | mode->clock);
                        evo_data(push, (ilace == 2) ? 2 : 0);
-                       evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 8);
+                       evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
                        evo_data(push, 0x00000000);
                        evo_data(push, (vactive << 16) | hactive);
                        evo_data(push, ( vsynce << 16) | hsynce);
                        evo_data(push, (vblanke << 16) | hblanke);
                        evo_data(push, (vblanks << 16) | hblanks);
                        evo_data(push, (vblan2e << 16) | vblan2s);
-                       evo_data(push, vblankus);
+                       evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
                        evo_data(push, 0x00000000);
                        evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
                        evo_data(push, 0x00000311);
@@ -1141,6 +1189,11 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
        nv_connector = nouveau_crtc_connector_get(nv_crtc);
        nv50_crtc_set_dither(nv_crtc, false);
        nv50_crtc_set_scale(nv_crtc, false);
+
+       /* G94 only accepts this after setting scale */
+       if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA)
+               nv50_crtc_set_raster_vblank_dmi(nv_crtc, vblankus);
+
        nv50_crtc_set_color_vibrance(nv_crtc, false);
        nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, false);
        return 0;
@@ -1220,13 +1273,13 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
                     uint32_t handle, uint32_t width, uint32_t height)
 {
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       struct nv50_curs *curs = nv50_curs(crtc);
        struct drm_device *dev = crtc->dev;
-       struct drm_gem_object *gem;
-       struct nouveau_bo *nvbo;
-       bool visible = (handle != 0);
-       int i, ret = 0;
+       struct drm_gem_object *gem = NULL;
+       struct nouveau_bo *nvbo = NULL;
+       int ret = 0;
 
-       if (visible) {
+       if (handle) {
                if (width != 64 || height != 64)
                        return -EINVAL;
 
@@ -1235,23 +1288,17 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
                        return -ENOENT;
                nvbo = nouveau_gem_object(gem);
 
-               ret = nouveau_bo_map(nvbo);
-               if (ret == 0) {
-                       for (i = 0; i < 64 * 64; i++) {
-                               u32 v = nouveau_bo_rd32(nvbo, i);
-                               nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
-                       }
-                       nouveau_bo_unmap(nvbo);
-               }
-
-               drm_gem_object_unreference_unlocked(gem);
+               ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
        }
 
-       if (visible != nv_crtc->cursor.visible) {
-               nv50_crtc_cursor_show_hide(nv_crtc, visible, true);
-               nv_crtc->cursor.visible = visible;
+       if (ret == 0) {
+               if (curs->image)
+                       nouveau_bo_unpin(curs->image);
+               nouveau_bo_ref(nvbo, &curs->image);
        }
+       drm_gem_object_unreference_unlocked(gem);
 
+       nv50_crtc_cursor_show_hide(nv_crtc, true, true);
        return ret;
 }
 
@@ -1306,10 +1353,10 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
                nouveau_bo_unpin(head->image);
        nouveau_bo_ref(NULL, &head->image);
 
-       nouveau_bo_unmap(nv_crtc->cursor.nvbo);
-       if (nv_crtc->cursor.nvbo)
-               nouveau_bo_unpin(nv_crtc->cursor.nvbo);
-       nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+       /*XXX: ditto */
+       if (head->curs.image)
+               nouveau_bo_unpin(head->curs.image);
+       nouveau_bo_ref(NULL, &head->curs.image);
 
        nouveau_bo_unmap(nv_crtc->lut.nvbo);
        if (nv_crtc->lut.nvbo)
@@ -1341,16 +1388,6 @@ static const struct drm_crtc_funcs nv50_crtc_func = {
        .page_flip = nouveau_crtc_page_flip,
 };
 
-static void
-nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
-}
-
-static void
-nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
-}
-
 static int
 nv50_crtc_create(struct drm_device *dev, int index)
 {
@@ -1369,8 +1406,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
        head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
        head->base.color_vibrance = 50;
        head->base.vibrant_hue = 0;
-       head->base.cursor.set_offset = nv50_cursor_set_offset;
-       head->base.cursor.set_pos = nv50_cursor_set_pos;
        for (i = 0; i < 256; i++) {
                head->base.lut.r[i] = i << 8;
                head->base.lut.g[i] = i << 8;
@@ -1385,7 +1420,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
        ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
                             0, 0x0000, NULL, NULL, &head->base.lut.nvbo);
        if (!ret) {
-               ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM);
+               ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true);
                if (!ret) {
                        ret = nouveau_bo_map(head->base.lut.nvbo);
                        if (ret)
@@ -1402,22 +1437,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
 
        /* allocate cursor resources */
        ret = nv50_curs_create(disp->disp, index, &head->curs);
-       if (ret)
-               goto out;
-
-       ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
-                            0, 0x0000, NULL, NULL, &head->base.cursor.nvbo);
-       if (!ret) {
-               ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM);
-               if (!ret) {
-                       ret = nouveau_bo_map(head->base.cursor.nvbo);
-                       if (ret)
-                               nouveau_bo_unpin(head->base.lut.nvbo);
-               }
-               if (ret)
-                       nouveau_bo_ref(NULL, &head->base.cursor.nvbo);
-       }
-
        if (ret)
                goto out;
 
@@ -1680,7 +1699,8 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
        drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
        memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
 
-       nvif_mthd(disp->disp, 0, &args, sizeof(args.base) + args.data[2] * 4);
+       nvif_mthd(disp->disp, 0, &args,
+                 sizeof(args.base) + drm_eld_size(args.data));
 }
 
 static void
@@ -2352,11 +2372,6 @@ nv50_fb_ctor(struct drm_framebuffer *fb)
        u8 kind = nouveau_bo_tile_layout(nvbo) >> 8;
        u8 tile = nvbo->tile_mode;
 
-       if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
-               NV_ERROR(drm, "framebuffer requires contiguous bo\n");
-               return -EINVAL;
-       }
-
        if (drm->device.info.chipset >= 0xc0)
                tile >>= 4; /* yep.. */
 
@@ -2470,7 +2485,7 @@ nv50_display_create(struct drm_device *dev)
        ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
                             0, 0x0000, NULL, NULL, &disp->sync);
        if (!ret) {
-               ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
+               ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
                if (!ret) {
                        ret = nouveau_bo_map(disp->sync);
                        if (ret)
index 22d242b37962cc14990174bc97f6876a30e31781..a82d9ea7c6fdeaf2a1fa96863b26a9ccc2b12e00 100644 (file)
@@ -102,7 +102,7 @@ nv50_fence_create(struct nouveau_drm *drm)
        ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
                             0, 0x0000, NULL, NULL, &priv->bo);
        if (!ret) {
-               ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+               ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
                if (!ret) {
                        ret = nouveau_bo_map(priv->bo);
                        if (ret)
index d6c6c87c3f07e2e929e49cf8ca7d41145f8c164b..cb5b88938d451c7f8d70be4a2816d11fe3f25d35 100644 (file)
@@ -234,7 +234,7 @@ nv84_fence_create(struct nouveau_drm *drm)
        ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
                             TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL, &priv->bo);
        if (ret == 0) {
-               ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+               ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
                if (ret == 0) {
                        ret = nouveau_bo_map(priv->bo);
                        if (ret)
@@ -246,10 +246,10 @@ nv84_fence_create(struct nouveau_drm *drm)
 
        if (ret == 0)
                ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
-                                    TTM_PL_FLAG_TT, 0, 0, NULL, NULL,
-                                    &priv->bo_gart);
+                                    TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0,
+                                    0, NULL, NULL, &priv->bo_gart);
        if (ret == 0) {
-               ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT);
+               ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT, false);
                if (ret == 0) {
                        ret = nouveau_bo_map(priv->bo_gart);
                        if (ret)
index e5a27df0672b5db5d0b21c11f8e46344eb706fa4..4e308eacb27a0c800df134e517753367a8ddf9f5 100644 (file)
@@ -35,6 +35,7 @@
 #define GK104_DISP                                                   0x00009170
 #define GK110_DISP                                                   0x00009270
 #define GM107_DISP                                                   0x00009470
+#define GM204_DISP                                                   0x00009570
 
 #define NV50_DISP_CURSOR                                             0x0000507a
 #define G82_DISP_CURSOR                                              0x0000827a
@@ -65,6 +66,7 @@
 #define GK104_DISP_CORE_CHANNEL_DMA                                  0x0000917d
 #define GK110_DISP_CORE_CHANNEL_DMA                                  0x0000927d
 #define GM107_DISP_CORE_CHANNEL_DMA                                  0x0000947d
+#define GM204_DISP_CORE_CHANNEL_DMA                                  0x0000957d
 
 #define NV50_DISP_OVERLAY_CHANNEL_DMA                                0x0000507e
 #define G82_DISP_OVERLAY_CHANNEL_DMA                                 0x0000827e
@@ -131,6 +133,7 @@ struct nv_device_v0 {
 #define NV_DEVICE_V0_DISABLE_COPY1                        0x0000010000000000ULL
 #define NV_DEVICE_V0_DISABLE_VIC                          0x0000020000000000ULL
 #define NV_DEVICE_V0_DISABLE_VENC                         0x0000040000000000ULL
+#define NV_DEVICE_V0_DISABLE_COPY2                        0x0000080000000000ULL
        __u64 disable;  /* disable particular subsystems */
        __u64 debug0;   /* as above, but *internal* ids, and *NOT* ABI */
 };
index 3c4df1fc26dce053ad8c2408cc8ba42906ae57d6..3f7ac5bc8e03990426c70a2e3c17622fe9edfb53 100644 (file)
@@ -62,6 +62,7 @@ nvif_drivers[] = {
 #else
        &nvif_driver_drm,
        &nvif_driver_lib,
+       &nvif_driver_null,
 #endif
        NULL
 };
index ac4bdb3ea506c518cf87eefc3c2e793cbc8bc23f..8bd39e69229c6103b2a811499ebb3023d29bc0ec 100644 (file)
@@ -17,5 +17,6 @@ struct nvif_driver {
 extern const struct nvif_driver nvif_driver_nvkm;
 extern const struct nvif_driver nvif_driver_drm;
 extern const struct nvif_driver nvif_driver_lib;
+extern const struct nvif_driver nvif_driver_null;
 
 #endif
index 2d28dc337cfb4ed50104370478021738de688316..b0566a1ca28f3d59c228d9c500b4004ab44895ac 100644 (file)
@@ -20,6 +20,7 @@
 #include "omap_drv.h"
 
 #include <drm/drm_mode.h>
+#include <drm/drm_plane_helper.h>
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
 
index e4849413ee80074616d6b6239cea6f0b1b16c5a3..aeb91ed653c9f8d2e410a6b95ed7e194168ef508 100644 (file)
@@ -612,8 +612,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 {
        union omap_gem_size gsize;
 
-       /* in case someone tries to feed us a completely bogus stride: */
-       args->pitch = align_pitch(args->pitch, args->width, args->bpp);
+       args->pitch = align_pitch(0, args->width, args->bpp);
        args->size = PAGE_ALIGN(args->pitch * args->height);
 
        gsize = (union omap_gem_size){
index 891a4dc608af85f2760ced973c9cf5163743d08f..ee8e2b3a117ef59c183bd4cd63075ea565be98c1 100644 (file)
@@ -388,20 +388,15 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
        struct drm_plane *plane = NULL;
        struct omap_plane *omap_plane;
        struct omap_overlay_info *info;
-       int ret;
 
        DBG("%s: priv=%d", plane_names[id], private_plane);
 
        omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
        if (!omap_plane)
-               goto fail;
+               return NULL;
 
-       ret = drm_flip_work_init(&omap_plane->unpin_work, 16,
+       drm_flip_work_init(&omap_plane->unpin_work,
                        "unpin", unpin_worker);
-       if (ret) {
-               dev_err(dev->dev, "could not allocate unpin FIFO\n");
-               goto fail;
-       }
 
        omap_plane->nformats = omap_framebuffer_get_formats(
                        omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
@@ -443,10 +438,4 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
                omap_plane->info.zorder = id;
 
        return plane;
-
-fail:
-       if (plane)
-               omap_plane_destroy(plane);
-
-       return NULL;
 }
index bee9f72b3a93a2cccb99476ac6433c53c2ed04c7..024e98ef8e4d1bcd7be6b70c648969e529c175e5 100644 (file)
@@ -27,4 +27,17 @@ config DRM_PANEL_S6E8AA0
        select DRM_MIPI_DSI
        select VIDEOMODE_HELPERS
 
+config DRM_PANEL_SHARP_LQ101R1SX01
+       tristate "Sharp LQ101R1SX01 panel"
+       depends on OF
+       depends on DRM_MIPI_DSI
+       help
+         Say Y here if you want to enable support for Sharp LQ101R1SX01
+         TFT-LCD modules. The panel has a 2560x1600 resolution and uses
+         24 bit RGB per pixel. It provides a dual MIPI DSI interface to
+         the host and has a built-in LED backlight.
+
+         To compile this driver as a module, choose M here: the module
+         will be called panel-sharp-lq101r1sx01.
+
 endmenu
index 8b929212fad791e6177d5f50ababb3fa8a1f60aa..4b2a0430804b1813ed02bc713e61590e7a62d849 100644 (file)
@@ -1,3 +1,4 @@
 obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
 obj-$(CONFIG_DRM_PANEL_LD9040) += panel-ld9040.o
 obj-$(CONFIG_DRM_PANEL_S6E8AA0) += panel-s6e8aa0.o
+obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
index 42ac67b21e9fac04d9f0e0599426f0fa48abf4cb..08cf2c588c3df3d4555f72d62d482b51098e5f0e 100644 (file)
@@ -145,7 +145,7 @@ static void ld9040_dcs_write(struct ld9040 *ctx, const u8 *data, size_t len)
        if (ctx->error < 0 || len == 0)
                return;
 
-       dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", len, data);
+       dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", (int)len, data);
        ret = ld9040_spi_write_word(ctx, *data);
 
        while (!ret && --len) {
@@ -154,8 +154,8 @@ static void ld9040_dcs_write(struct ld9040 *ctx, const u8 *data, size_t len)
        }
 
        if (ret) {
-               dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, len,
-                       data);
+               dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret,
+                       (int)len, data);
                ctx->error = ret;
        }
 
@@ -336,17 +336,12 @@ static int ld9040_probe(struct spi_device *spi)
        if (ret < 0)
                return ret;
 
-       ctx->reset_gpio = devm_gpiod_get(dev, "reset");
+       ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(ctx->reset_gpio)) {
                dev_err(dev, "cannot get reset-gpios %ld\n",
                        PTR_ERR(ctx->reset_gpio));
                return PTR_ERR(ctx->reset_gpio);
        }
-       ret = gpiod_direction_output(ctx->reset_gpio, 1);
-       if (ret < 0) {
-               dev_err(dev, "cannot configure reset-gpios %d\n", ret);
-               return ret;
-       }
 
        spi->bits_per_word = 9;
        ret = spi_setup(spi);
index b5217fe37f02742e87df986280fb69c2097bead9..144b2733e3d70ffd33a71997da4fae29fc8723ec 100644 (file)
@@ -141,10 +141,10 @@ static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len)
        if (ctx->error < 0)
                return;
 
-       ret = mipi_dsi_dcs_write(dsi, data, len);
+       ret = mipi_dsi_dcs_write_buffer(dsi, data, len);
        if (ret < 0) {
-               dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret, len,
-                       data);
+               dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret,
+                       (int)len, data);
                ctx->error = ret;
        }
 }
@@ -800,27 +800,15 @@ static void s6e8aa0_panel_init(struct s6e8aa0 *ctx)
 }
 
 static void s6e8aa0_set_maximum_return_packet_size(struct s6e8aa0 *ctx,
-                                                  int size)
+                                                  u16 size)
 {
        struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-       const struct mipi_dsi_host_ops *ops = dsi->host->ops;
-       u8 buf[] = {size, 0};
-       struct mipi_dsi_msg msg = {
-               .channel = dsi->channel,
-               .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
-               .tx_len = sizeof(buf),
-               .tx_buf = buf
-       };
        int ret;
 
        if (ctx->error < 0)
                return;
 
-       if (!ops || !ops->transfer)
-               ret = -EIO;
-       else
-               ret = ops->transfer(dsi->host, &msg);
-
+       ret = mipi_dsi_set_maximum_return_packet_size(dsi, size);
        if (ret < 0) {
                dev_err(ctx->dev,
                        "error %d setting maximum return packet size to %d\n",
@@ -1019,17 +1007,12 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
                return ret;
        }
 
-       ctx->reset_gpio = devm_gpiod_get(dev, "reset");
+       ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(ctx->reset_gpio)) {
                dev_err(dev, "cannot get reset-gpios %ld\n",
                        PTR_ERR(ctx->reset_gpio));
                return PTR_ERR(ctx->reset_gpio);
        }
-       ret = gpiod_direction_output(ctx->reset_gpio, 1);
-       if (ret < 0) {
-               dev_err(dev, "cannot configure reset-gpios %d\n", ret);
-               return ret;
-       }
 
        ctx->brightness = GAMMA_LEVEL_NUM - 1;
 
@@ -1069,7 +1052,6 @@ static struct mipi_dsi_driver s6e8aa0_driver = {
        .remove = s6e8aa0_remove,
        .driver = {
                .name = "panel_s6e8aa0",
-               .owner = THIS_MODULE,
                .of_match_table = s6e8aa0_of_match,
        },
 };
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
new file mode 100644 (file)
index 0000000..9d81759
--- /dev/null
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) 2014 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#include <linux/host1x.h>
+
+struct sharp_panel {
+       struct drm_panel base;
+       /* the datasheet refers to them as DSI-LINK1 and DSI-LINK2 */
+       struct mipi_dsi_device *link1;
+       struct mipi_dsi_device *link2;
+
+       struct backlight_device *backlight;
+       struct regulator *supply;
+
+       bool prepared;
+       bool enabled;
+
+       const struct drm_display_mode *mode;
+};
+
+static inline struct sharp_panel *to_sharp_panel(struct drm_panel *panel)
+{
+       return container_of(panel, struct sharp_panel, base);
+}
+
+static int sharp_panel_write(struct sharp_panel *sharp, u16 offset, u8 value)
+{
+       u8 payload[3] = { offset >> 8, offset & 0xff, value };
+       struct mipi_dsi_device *dsi = sharp->link1;
+       ssize_t err;
+
+       err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
+       if (err < 0) {
+               dev_err(&dsi->dev, "failed to write %02x to %04x: %zd\n",
+                       value, offset, err);
+               return err;
+       }
+
+       err = mipi_dsi_dcs_nop(dsi);
+       if (err < 0) {
+               dev_err(&dsi->dev, "failed to send DCS nop: %zd\n", err);
+               return err;
+       }
+
+       usleep_range(10, 20);
+
+       return 0;
+}
+
+static __maybe_unused int sharp_panel_read(struct sharp_panel *sharp,
+                                          u16 offset, u8 *value)
+{
+       ssize_t err;
+
+       cpu_to_be16s(&offset);
+
+       err = mipi_dsi_generic_read(sharp->link1, &offset, sizeof(offset),
+                                   value, sizeof(*value));
+       if (err < 0)
+               dev_err(&sharp->link1->dev, "failed to read from %04x: %zd\n",
+                       offset, err);
+
+       return err;
+}
+
+static int sharp_panel_disable(struct drm_panel *panel)
+{
+       struct sharp_panel *sharp = to_sharp_panel(panel);
+
+       if (!sharp->enabled)
+               return 0;
+
+       if (sharp->backlight) {
+               sharp->backlight->props.power = FB_BLANK_POWERDOWN;
+               backlight_update_status(sharp->backlight);
+       }
+
+       sharp->enabled = false;
+
+       return 0;
+}
+
+static int sharp_panel_unprepare(struct drm_panel *panel)
+{
+       struct sharp_panel *sharp = to_sharp_panel(panel);
+       int err;
+
+       if (!sharp->prepared)
+               return 0;
+
+       err = mipi_dsi_dcs_set_display_off(sharp->link1);
+       if (err < 0)
+               dev_err(panel->dev, "failed to set display off: %d\n", err);
+
+       err = mipi_dsi_dcs_enter_sleep_mode(sharp->link1);
+       if (err < 0)
+               dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
+
+       msleep(120);
+
+       regulator_disable(sharp->supply);
+
+       sharp->prepared = false;
+
+       return 0;
+}
+
+static int sharp_setup_symmetrical_split(struct mipi_dsi_device *left,
+                                        struct mipi_dsi_device *right,
+                                        const struct drm_display_mode *mode)
+{
+       int err;
+
+       err = mipi_dsi_dcs_set_column_address(left, 0, mode->hdisplay / 2 - 1);
+       if (err < 0) {
+               dev_err(&left->dev, "failed to set column address: %d\n", err);
+               return err;
+       }
+
+       err = mipi_dsi_dcs_set_page_address(left, 0, mode->vdisplay - 1);
+       if (err < 0) {
+               dev_err(&left->dev, "failed to set page address: %d\n", err);
+               return err;
+       }
+
+       err = mipi_dsi_dcs_set_column_address(right, mode->hdisplay / 2,
+                                             mode->hdisplay - 1);
+       if (err < 0) {
+               dev_err(&right->dev, "failed to set column address: %d\n", err);
+               return err;
+       }
+
+       err = mipi_dsi_dcs_set_page_address(right, 0, mode->vdisplay - 1);
+       if (err < 0) {
+               dev_err(&right->dev, "failed to set page address: %d\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+static int sharp_panel_prepare(struct drm_panel *panel)
+{
+       struct sharp_panel *sharp = to_sharp_panel(panel);
+       u8 format = MIPI_DCS_PIXEL_FMT_24BIT;
+       int err;
+
+       if (sharp->prepared)
+               return 0;
+
+       err = regulator_enable(sharp->supply);
+       if (err < 0)
+               return err;
+
+       usleep_range(10000, 20000);
+
+       err = mipi_dsi_dcs_soft_reset(sharp->link1);
+       if (err < 0) {
+               dev_err(panel->dev, "soft reset failed: %d\n", err);
+               goto poweroff;
+       }
+
+       msleep(120);
+
+       err = mipi_dsi_dcs_exit_sleep_mode(sharp->link1);
+       if (err < 0) {
+               dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
+               goto poweroff;
+       }
+
+       /*
+        * The MIPI DCS specification mandates this delay only between the
+        * exit_sleep_mode and enter_sleep_mode commands, so it isn't strictly
+        * necessary here.
+        */
+       /*
+       msleep(120);
+       */
+
+       /* set left-right mode */
+       err = sharp_panel_write(sharp, 0x1000, 0x2a);
+       if (err < 0) {
+               dev_err(panel->dev, "failed to set left-right mode: %d\n", err);
+               goto poweroff;
+       }
+
+       /* enable command mode */
+       err = sharp_panel_write(sharp, 0x1001, 0x01);
+       if (err < 0) {
+               dev_err(panel->dev, "failed to enable command mode: %d\n", err);
+               goto poweroff;
+       }
+
+       err = mipi_dsi_dcs_set_pixel_format(sharp->link1, format);
+       if (err < 0) {
+               dev_err(panel->dev, "failed to set pixel format: %d\n", err);
+               goto poweroff;
+       }
+
+       /*
+        * TODO: The device supports both left-right and even-odd split
+        * configurations, but this driver currently supports only the left-
+        * right split. To support a different mode a mechanism needs to be
+        * put in place to communicate the configuration back to the DSI host
+        * controller.
+        */
+       err = sharp_setup_symmetrical_split(sharp->link1, sharp->link2,
+                                           sharp->mode);
+       if (err < 0) {
+               dev_err(panel->dev, "failed to set up symmetrical split: %d\n",
+                       err);
+               goto poweroff;
+       }
+
+       err = mipi_dsi_dcs_set_display_on(sharp->link1);
+       if (err < 0) {
+               dev_err(panel->dev, "failed to set display on: %d\n", err);
+               goto poweroff;
+       }
+
+       sharp->prepared = true;
+
+       return 0;
+
+poweroff:
+       regulator_disable(sharp->supply);
+       return err;
+}
+
+static int sharp_panel_enable(struct drm_panel *panel)
+{
+       struct sharp_panel *sharp = to_sharp_panel(panel);
+
+       if (sharp->enabled)
+               return 0;
+
+       if (sharp->backlight) {
+               sharp->backlight->props.power = FB_BLANK_UNBLANK;
+               backlight_update_status(sharp->backlight);
+       }
+
+       sharp->enabled = true;
+
+       return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+       .clock = 278000,
+       .hdisplay = 2560,
+       .hsync_start = 2560 + 128,
+       .hsync_end = 2560 + 128 + 64,
+       .htotal = 2560 + 128 + 64 + 64,
+       .vdisplay = 1600,
+       .vsync_start = 1600 + 4,
+       .vsync_end = 1600 + 4 + 8,
+       .vtotal = 1600 + 4 + 8 + 32,
+       .vrefresh = 60,
+};
+
+static int sharp_panel_get_modes(struct drm_panel *panel)
+{
+       struct drm_display_mode *mode;
+
+       mode = drm_mode_duplicate(panel->drm, &default_mode);
+       if (!mode) {
+               dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+                       default_mode.hdisplay, default_mode.vdisplay,
+                       default_mode.vrefresh);
+               return -ENOMEM;
+       }
+
+       drm_mode_set_name(mode);
+
+       drm_mode_probed_add(panel->connector, mode);
+
+       panel->connector->display_info.width_mm = 217;
+       panel->connector->display_info.height_mm = 136;
+
+       return 1;
+}
+
+static const struct drm_panel_funcs sharp_panel_funcs = {
+       .disable = sharp_panel_disable,
+       .unprepare = sharp_panel_unprepare,
+       .prepare = sharp_panel_prepare,
+       .enable = sharp_panel_enable,
+       .get_modes = sharp_panel_get_modes,
+};
+
+static const struct of_device_id sharp_of_match[] = {
+       { .compatible = "sharp,lq101r1sx01", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, sharp_of_match);
+
+static int sharp_panel_add(struct sharp_panel *sharp)
+{
+       struct device_node *np;
+       int err;
+
+       sharp->mode = &default_mode;
+
+       sharp->supply = devm_regulator_get(&sharp->link1->dev, "power");
+       if (IS_ERR(sharp->supply))
+               return PTR_ERR(sharp->supply);
+
+       np = of_parse_phandle(sharp->link1->dev.of_node, "backlight", 0);
+       if (np) {
+               sharp->backlight = of_find_backlight_by_node(np);
+               of_node_put(np);
+
+               if (!sharp->backlight)
+                       return -EPROBE_DEFER;
+       }
+
+       drm_panel_init(&sharp->base);
+       sharp->base.funcs = &sharp_panel_funcs;
+       sharp->base.dev = &sharp->link1->dev;
+
+       err = drm_panel_add(&sharp->base);
+       if (err < 0)
+               goto put_backlight;
+
+       return 0;
+
+put_backlight:
+       if (sharp->backlight)
+               put_device(&sharp->backlight->dev);
+
+       return err;
+}
+
+static void sharp_panel_del(struct sharp_panel *sharp)
+{
+       if (sharp->base.dev)
+               drm_panel_remove(&sharp->base);
+
+       if (sharp->backlight)
+               put_device(&sharp->backlight->dev);
+
+       if (sharp->link2)
+               put_device(&sharp->link2->dev);
+}
+
+static int sharp_panel_probe(struct mipi_dsi_device *dsi)
+{
+       struct mipi_dsi_device *secondary = NULL;
+       struct sharp_panel *sharp;
+       struct device_node *np;
+       int err;
+
+       dsi->lanes = 4;
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->mode_flags = MIPI_DSI_MODE_LPM;
+
+       /* Find DSI-LINK1 */
+       np = of_parse_phandle(dsi->dev.of_node, "link2", 0);
+       if (np) {
+               secondary = of_find_mipi_dsi_device_by_node(np);
+               of_node_put(np);
+
+               if (!secondary)
+                       return -EPROBE_DEFER;
+       }
+
+       /* register a panel for only the DSI-LINK1 interface */
+       if (secondary) {
+               sharp = devm_kzalloc(&dsi->dev, sizeof(*sharp), GFP_KERNEL);
+               if (!sharp) {
+                       put_device(&secondary->dev);
+                       return -ENOMEM;
+               }
+
+               mipi_dsi_set_drvdata(dsi, sharp);
+
+               sharp->link2 = secondary;
+               sharp->link1 = dsi;
+
+               err = sharp_panel_add(sharp);
+               if (err < 0) {
+                       put_device(&secondary->dev);
+                       return err;
+               }
+       }
+
+       err = mipi_dsi_attach(dsi);
+       if (err < 0) {
+               if (secondary)
+                       sharp_panel_del(sharp);
+
+               return err;
+       }
+
+       return 0;
+}
+
+static int sharp_panel_remove(struct mipi_dsi_device *dsi)
+{
+       struct sharp_panel *sharp = mipi_dsi_get_drvdata(dsi);
+       int err;
+
+       /* only detach from host for the DSI-LINK2 interface */
+       if (!sharp) {
+               mipi_dsi_detach(dsi);
+               return 0;
+       }
+
+       err = sharp_panel_disable(&sharp->base);
+       if (err < 0)
+               dev_err(&dsi->dev, "failed to disable panel: %d\n", err);
+
+       err = mipi_dsi_detach(dsi);
+       if (err < 0)
+               dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+       drm_panel_detach(&sharp->base);
+       sharp_panel_del(sharp);
+
+       return 0;
+}
+
+static void sharp_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+       struct sharp_panel *sharp = mipi_dsi_get_drvdata(dsi);
+
+       /* nothing to do for DSI-LINK2 */
+       if (!sharp)
+               return;
+
+       sharp_panel_disable(&sharp->base);
+}
+
+static struct mipi_dsi_driver sharp_panel_driver = {
+       .driver = {
+               .name = "panel-sharp-lq101r1sx01",
+               .of_match_table = sharp_of_match,
+       },
+       .probe = sharp_panel_probe,
+       .remove = sharp_panel_remove,
+       .shutdown = sharp_panel_shutdown,
+};
+module_mipi_dsi_driver(sharp_panel_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("Sharp LQ101R1SX01 panel driver");
+MODULE_LICENSE("GPL v2");
index 23de22f8c82078a11a4c9a7815c8aa099d74ddf9..c4b6167a8bf33c0189bc83ea298653d2d95fcb2b 100644 (file)
@@ -247,21 +247,14 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
        if (IS_ERR(panel->supply))
                return PTR_ERR(panel->supply);
 
-       panel->enable_gpio = devm_gpiod_get_optional(dev, "enable");
+       panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+                                                    GPIOD_OUT_LOW);
        if (IS_ERR(panel->enable_gpio)) {
                err = PTR_ERR(panel->enable_gpio);
                dev_err(dev, "failed to request GPIO: %d\n", err);
                return err;
        }
 
-       if (panel->enable_gpio) {
-               err = gpiod_direction_output(panel->enable_gpio, 0);
-               if (err < 0) {
-                       dev_err(dev, "failed to setup GPIO: %d\n", err);
-                       return err;
-               }
-       }
-
        backlight = of_parse_phandle(dev->of_node, "backlight", 0);
        if (backlight) {
                panel->backlight = of_find_backlight_by_node(backlight);
@@ -376,6 +369,29 @@ static const struct panel_desc auo_b101xtn01 = {
        },
 };
 
+static const struct drm_display_mode auo_b116xw03_mode = {
+       .clock = 70589,
+       .hdisplay = 1366,
+       .hsync_start = 1366 + 40,
+       .hsync_end = 1366 + 40 + 40,
+       .htotal = 1366 + 40 + 40 + 32,
+       .vdisplay = 768,
+       .vsync_start = 768 + 10,
+       .vsync_end = 768 + 10 + 12,
+       .vtotal = 768 + 10 + 12 + 6,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc auo_b116xw03 = {
+       .modes = &auo_b116xw03_mode,
+       .num_modes = 1,
+       .bpc = 6,
+       .size = {
+               .width = 256,
+               .height = 144,
+       },
+};
+
 static const struct drm_display_mode auo_b133xtn01_mode = {
        .clock = 69500,
        .hdisplay = 1366,
@@ -415,6 +431,7 @@ static const struct drm_display_mode auo_b133htn01_mode = {
 static const struct panel_desc auo_b133htn01 = {
        .modes = &auo_b133htn01_mode,
        .num_modes = 1,
+       .bpc = 6,
        .size = {
                .width = 293,
                .height = 165,
@@ -536,22 +553,92 @@ static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
 static const struct panel_desc foxlink_fl500wvr00_a0t = {
        .modes = &foxlink_fl500wvr00_a0t_mode,
        .num_modes = 1,
+       .bpc = 8,
        .size = {
                .width = 108,
                .height = 65,
        },
 };
 
-static const struct drm_display_mode innolux_n116bge_mode = {
+static const struct drm_display_mode hannstar_hsd070pww1_mode = {
+       .clock = 71100,
+       .hdisplay = 1280,
+       .hsync_start = 1280 + 1,
+       .hsync_end = 1280 + 1 + 158,
+       .htotal = 1280 + 1 + 158 + 1,
+       .vdisplay = 800,
+       .vsync_start = 800 + 1,
+       .vsync_end = 800 + 1 + 21,
+       .vtotal = 800 + 1 + 21 + 1,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc hannstar_hsd070pww1 = {
+       .modes = &hannstar_hsd070pww1_mode,
+       .num_modes = 1,
+       .bpc = 6,
+       .size = {
+               .width = 151,
+               .height = 94,
+       },
+};
+
+static const struct drm_display_mode hitachi_tx23d38vm0caa_mode = {
+       .clock = 33333,
+       .hdisplay = 800,
+       .hsync_start = 800 + 85,
+       .hsync_end = 800 + 85 + 86,
+       .htotal = 800 + 85 + 86 + 85,
+       .vdisplay = 480,
+       .vsync_start = 480 + 16,
+       .vsync_end = 480 + 16 + 13,
+       .vtotal = 480 + 16 + 13 + 16,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc hitachi_tx23d38vm0caa = {
+       .modes = &hitachi_tx23d38vm0caa_mode,
+       .num_modes = 1,
+       .bpc = 6,
+       .size = {
+               .width = 195,
+               .height = 117,
+       },
+};
+
+static const struct drm_display_mode innolux_g121i1_l01_mode = {
        .clock = 71000,
+       .hdisplay = 1280,
+       .hsync_start = 1280 + 64,
+       .hsync_end = 1280 + 64 + 32,
+       .htotal = 1280 + 64 + 32 + 64,
+       .vdisplay = 800,
+       .vsync_start = 800 + 9,
+       .vsync_end = 800 + 9 + 6,
+       .vtotal = 800 + 9 + 6 + 9,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc innolux_g121i1_l01 = {
+       .modes = &innolux_g121i1_l01_mode,
+       .num_modes = 1,
+       .bpc = 6,
+       .size = {
+               .width = 261,
+               .height = 163,
+       },
+};
+
+static const struct drm_display_mode innolux_n116bge_mode = {
+       .clock = 76420,
        .hdisplay = 1366,
-       .hsync_start = 1366 + 64,
-       .hsync_end = 1366 + 64 + 6,
-       .htotal = 1366 + 64 + 6 + 64,
+       .hsync_start = 1366 + 136,
+       .hsync_end = 1366 + 136 + 30,
+       .htotal = 1366 + 136 + 30 + 60,
        .vdisplay = 768,
        .vsync_start = 768 + 8,
-       .vsync_end = 768 + 8 + 4,
-       .vtotal = 768 + 8 + 4 + 8,
+       .vsync_end = 768 + 8 + 12,
+       .vtotal = 768 + 8 + 12 + 12,
        .vrefresh = 60,
        .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
 };
@@ -642,6 +729,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "auo,b101xtn01",
                .data = &auo_b101xtn01,
+       }, {
+               .compatible = "auo,b116xw03",
+               .data = &auo_b116xw03,
        }, {
                .compatible = "auo,b133htn01",
                .data = &auo_b133htn01,
@@ -666,6 +756,15 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "foxlink,fl500wvr00-a0t",
                .data = &foxlink_fl500wvr00_a0t,
+       }, {
+               .compatible = "hannstar,hsd070pww1",
+               .data = &hannstar_hsd070pww1,
+       }, {
+               .compatible = "hit,tx23d38vm0caa",
+               .data = &hitachi_tx23d38vm0caa
+       }, {
+               .compatible ="innolux,g121i1-l01",
+               .data = &innolux_g121i1_l01
        }, {
                .compatible = "innolux,n116bge",
                .data = &innolux_n116bge,
@@ -741,6 +840,7 @@ static const struct panel_desc_dsi lg_ld070wx3_sl01 = {
        .desc = {
                .modes = &lg_ld070wx3_sl01_mode,
                .num_modes = 1,
+               .bpc = 8,
                .size = {
                        .width = 94,
                        .height = 151,
@@ -768,6 +868,7 @@ static const struct panel_desc_dsi lg_lh500wx1_sd03 = {
        .desc = {
                .modes = &lg_lh500wx1_sd03_mode,
                .num_modes = 1,
+               .bpc = 8,
                .size = {
                        .width = 62,
                        .height = 110,
@@ -795,6 +896,7 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
        .desc = {
                .modes = &panasonic_vvx10f004b00_mode,
                .num_modes = 1,
+               .bpc = 8,
                .size = {
                        .width = 217,
                        .height = 136,
@@ -864,7 +966,6 @@ static void panel_simple_dsi_shutdown(struct mipi_dsi_device *dsi)
 static struct mipi_dsi_driver panel_simple_dsi_driver = {
        .driver = {
                .name = "panel-simple-dsi",
-               .owner = THIS_MODULE,
                .of_match_table = dsi_of_match,
        },
        .probe = panel_simple_dsi_probe,
index 0d13962668578748af69ab19827c6f90598a36bf..4a0a8b29b0a1e070ab0ce4e7574def7a73ab7383 100644 (file)
@@ -29,6 +29,7 @@
 #include "qxl_drv.h"
 #include "qxl_object.h"
 #include "drm_crtc_helper.h"
+#include <drm/drm_plane_helper.h>
 
 static bool qxl_head_enabled(struct qxl_head *head)
 {
@@ -100,14 +101,37 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
        return 0;
 }
 
+static void qxl_update_offset_props(struct qxl_device *qdev)
+{
+       struct drm_device *dev = qdev->ddev;
+       struct drm_connector *connector;
+       struct qxl_output *output;
+       struct qxl_head *head;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               output = drm_connector_to_qxl_output(connector);
+
+               head = &qdev->client_monitors_config->heads[output->index];
+
+               drm_object_property_set_value(&connector->base,
+                       dev->mode_config.suggested_x_property, head->x);
+               drm_object_property_set_value(&connector->base,
+                       dev->mode_config.suggested_y_property, head->y);
+       }
+}
+
 void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
 {
 
+       struct drm_device *dev = qdev->ddev;
        while (qxl_display_copy_rom_client_monitors_config(qdev)) {
                qxl_io_log(qdev, "failed crc check for client_monitors_config,"
                                 " retrying\n");
        }
 
+       drm_modeset_lock_all(dev);
+       qxl_update_offset_props(qdev);
+       drm_modeset_unlock_all(dev);
        if (!drm_helper_hpd_irq_event(qdev->ddev)) {
                /* notify that the monitor configuration changed, to
                   adjust at the arbitrary resolution */
@@ -568,7 +592,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
 {
        struct drm_device *dev = crtc->dev;
        struct qxl_device *qdev = dev->dev_private;
-       struct qxl_mode *m = (void *)mode->private;
        struct qxl_framebuffer *qfb;
        struct qxl_bo *bo, *old_bo = NULL;
        struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
@@ -586,12 +609,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
        }
        qfb = to_qxl_framebuffer(crtc->primary->fb);
        bo = gem_to_qxl_bo(qfb->obj);
-       if (!m)
-               /* and do we care? */
-               DRM_DEBUG("%dx%d: not a native mode\n", x, y);
-       else
-               DRM_DEBUG("%dx%d: qxl id %d\n",
-                         mode->hdisplay, mode->vdisplay, m->id);
        DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n",
                  x, y,
                  mode->hdisplay, mode->vdisplay,
@@ -951,6 +968,10 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
 
        drm_object_attach_property(&connector->base,
                                   qdev->hotplug_mode_update_property, 0);
+       drm_object_attach_property(&connector->base,
+                                  dev->mode_config.suggested_x_property, 0);
+       drm_object_attach_property(&connector->base,
+                                  dev->mode_config.suggested_y_property, 0);
        drm_connector_register(connector);
        return 0;
 }
@@ -1064,6 +1085,7 @@ int qxl_modeset_init(struct qxl_device *qdev)
 
        qdev->ddev->mode_config.fb_base = qdev->vram_base;
 
+       drm_mode_create_suggested_offset_properties(qdev->ddev);
        qxl_mode_create_hotplug_mode_update_property(qdev);
 
        for (i = 0 ; i < qxl_num_crtc; ++i) {
index 446e71ca36cb111c5d47aad5b4928eb6548a68c5..d9b25684ac989d73341b7cddd70bb181b28eee2b 100644 (file)
@@ -264,7 +264,8 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
        if (list_is_singular(&release->bos))
                return 0;
 
-       ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr);
+       ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
+                                    !no_intr, NULL);
        if (ret)
                return ret;
 
index 575e986f82a76239113e2e122b42ce961a24e3e0..8fd2d9f58f770a3de4b7fc2b840b1e788c5aafdb 100644 (file)
@@ -905,7 +905,7 @@ static int r128_cce_dispatch_write_span(struct drm_device *dev,
        if (IS_ERR(buffer))
                return PTR_ERR(buffer);
 
-       mask_size = depth->n * sizeof(u8);
+       mask_size = depth->n;
        if (depth->mask) {
                mask = memdup_user(depth->mask, mask_size);
                if (IS_ERR(mask)) {
@@ -1010,7 +1010,7 @@ static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
        }
 
        if (depth->mask) {
-               mask_size = depth->n * sizeof(u8);
+               mask_size = depth->n;
                mask = memdup_user(depth->mask, mask_size);
                if (IS_ERR(mask)) {
                        kfree(x);
index d01b87991422588c24b8f477a1050641cb683aa1..12bc21219a0ea13f305133139ba9a4afe9dcf618 100644 (file)
@@ -80,7 +80,8 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
        r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
        rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
        trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
-       ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o
+       ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o \
+       radeon_sync.o
 
 # add async DMA block
 radeon-y += \
@@ -104,6 +105,7 @@ radeon-y += \
        radeon_vce.o \
        vce_v1_0.o \
        vce_v2_0.o \
+       radeon_kfd.o
 
 radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
 radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
index 15da7ef344a4e55fa11d604c21269e97ab361386..ec1593a6a561b550d873033d1c9cd7c9e6974af4 100644 (file)
@@ -1217,7 +1217,7 @@ free:
        return ret;
 }
 
-int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params)
 {
        int r;
 
@@ -1238,6 +1238,15 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
        return r;
 }
 
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+{
+       int r;
+       mutex_lock(&ctx->scratch_mutex);
+       r = atom_execute_table_scratch_unlocked(ctx, index, params);
+       mutex_unlock(&ctx->scratch_mutex);
+       return r;
+}
+
 static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
 
 static void atom_index_iio(struct atom_context *ctx, int base)
index feba6b8d36b346640ba09f19e571e069cb575f8f..6d014ddb6b7837f035bb332a9b9d12b22256c741 100644 (file)
@@ -125,6 +125,7 @@ struct card_info {
 struct atom_context {
        struct card_info *card;
        struct mutex mutex;
+       struct mutex scratch_mutex;
        void *bios;
        uint32_t cmd_table, data_table;
        uint16_t *iio;
@@ -145,6 +146,7 @@ extern int atom_debug;
 
 struct atom_context *atom_parse(struct card_info *, void *);
 int atom_execute_table(struct atom_context *, int, uint32_t *);
+int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *);
 int atom_asic_init(struct atom_context *);
 void atom_destroy(struct atom_context *);
 bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
index 30d242b25078e1f5c35d63384961db4279c7ffd4..d59ec491dbb9cba64d76369e62ef61b836c094d8 100644 (file)
@@ -2039,6 +2039,7 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
        atombios_crtc_set_base(crtc, x, y, old_fb);
        atombios_overscan_setup(crtc, mode, adjusted_mode);
        atombios_scaler_setup(crtc);
+       radeon_cursor_reset(crtc);
        /* update the hw version fpr dpm */
        radeon_crtc->hw_mode = *adjusted_mode;
 
index 95d5d4ab3335edd2c86146c487c9cbd7bc269182..11ba9d21b89b608788f623822bcfb6f9f14dbbf1 100644 (file)
@@ -100,6 +100,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
        memset(&args, 0, sizeof(args));
 
        mutex_lock(&chan->mutex);
+       mutex_lock(&rdev->mode_info.atom_context->scratch_mutex);
 
        base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
 
@@ -113,7 +114,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
        if (ASIC_IS_DCE4(rdev))
                args.v2.ucHPD_ID = chan->rec.hpd;
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
        *ack = args.v1.ucReplyStatus;
 
@@ -147,6 +148,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
 
        r = recv_bytes;
 done:
+       mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex);
        mutex_unlock(&chan->mutex);
 
        return r;
index 9c570fb15b8c2e49a9765787d26a58a1467c8e0e..4157780585a0e8e89811e97349d99eb7bf026202 100644 (file)
@@ -48,6 +48,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
        memset(&args, 0, sizeof(args));
 
        mutex_lock(&chan->mutex);
+       mutex_lock(&rdev->mode_info.atom_context->scratch_mutex);
 
        base = (unsigned char *)rdev->mode_info.atom_context->scratch;
 
@@ -82,7 +83,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
        args.ucSlaveAddr = slave_addr << 1;
        args.ucLineNumber = chan->rec.i2c_id;
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
        /* error */
        if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
@@ -95,6 +96,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
                radeon_atom_copy_swap(buf, base, num, false);
 
 done:
+       mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex);
        mutex_unlock(&chan->mutex);
 
        return r;
index 11a55e9dad7fef5d04ae838a6b7ae2c2f6b04958..f373a81ba3d5f8f969bf810d3d373decf3889170 100644 (file)
 static const struct ci_pt_defaults defaults_hawaii_xt =
 {
        1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
-       { 0x84,  0x0,   0x0,   0x7F,  0x0,   0x0,   0x5A,  0x60,  0x51,  0x8E,  0x79,  0x6B,  0x5F,  0x90,  0x79  },
-       { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
+       { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
+       { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
 };
 
 static const struct ci_pt_defaults defaults_hawaii_pro =
 {
        1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
-       { 0x93,  0x0,   0x0,   0x97,  0x0,   0x0,   0x6B,  0x60,  0x51,  0x95,  0x79,  0x6B,  0x5F,  0x90,  0x79  },
-       { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
+       { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
+       { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
 };
 
 static const struct ci_pt_defaults defaults_bonaire_xt =
@@ -184,6 +184,9 @@ static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
                                       u32 target_tdp);
 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
 
+static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+                                                     PPSMC_Msg msg, u32 parameter);
+
 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
 {
         struct ci_power_info *pi = rdev->pm.dpm.priv;
@@ -249,7 +252,10 @@ static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
 
        if (pi->caps_power_containment) {
                pi->caps_cac = true;
-               pi->enable_bapm_feature = true;
+               if (rdev->family == CHIP_HAWAII)
+                       pi->enable_bapm_feature = false;
+               else
+                       pi->enable_bapm_feature = true;
                pi->enable_tdc_limit_feature = true;
                pi->enable_pkg_pwr_tracking_feature = true;
        }
@@ -352,6 +358,21 @@ static int ci_populate_dw8(struct radeon_device *rdev)
        return 0;
 }
 
+static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
+{
+       struct ci_power_info *pi = ci_get_pi(rdev);
+
+       if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
+           (rdev->pm.dpm.fan.fan_output_sensitivity == 0))
+               rdev->pm.dpm.fan.fan_output_sensitivity =
+                       rdev->pm.dpm.fan.default_fan_output_sensitivity;
+
+       pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
+               cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
+
+       return 0;
+}
+
 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
 {
        struct ci_power_info *pi = ci_get_pi(rdev);
@@ -475,6 +496,9 @@ static int ci_populate_pm_base(struct radeon_device *rdev)
                if (ret)
                        return ret;
                ret = ci_populate_dw8(rdev);
+               if (ret)
+                       return ret;
+               ret = ci_populate_fuzzy_fan(rdev);
                if (ret)
                        return ret;
                ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
@@ -690,6 +714,25 @@ static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
        return ret;
 }
 
+static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev,
+                                           bool enable)
+{
+       struct ci_power_info *pi = ci_get_pi(rdev);
+       PPSMC_Result smc_result = PPSMC_Result_OK;
+
+       if (pi->thermal_sclk_dpm_enabled) {
+               if (enable)
+                       smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM);
+               else
+                       smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM);
+       }
+
+       if (smc_result == PPSMC_Result_OK)
+               return 0;
+       else
+               return -EINVAL;
+}
+
 static int ci_power_control_set_level(struct radeon_device *rdev)
 {
        struct ci_power_info *pi = ci_get_pi(rdev);
@@ -700,13 +743,11 @@ static int ci_power_control_set_level(struct radeon_device *rdev)
        int ret = 0;
        bool adjust_polarity = false; /* ??? */
 
-       if (pi->caps_power_containment &&
-           (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
+       if (pi->caps_power_containment) {
                adjust_percent = adjust_polarity ?
                        rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
                target_tdp = ((100 + adjust_percent) *
                              (s32)cac_tdp_table->configurable_tdp) / 100;
-               target_tdp *= 256;
 
                ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
        }
@@ -814,7 +855,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
        }
 }
 
-static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
+static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
                                            int min_temp, int max_temp)
 {
        int low_temp = 0 * 1000;
@@ -850,6 +891,350 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
        return 0;
 }
 
+static int ci_thermal_enable_alert(struct radeon_device *rdev,
+                                  bool enable)
+{
+       u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
+       PPSMC_Result result;
+
+       if (enable) {
+               thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+               WREG32_SMC(CG_THERMAL_INT, thermal_int);
+               rdev->irq.dpm_thermal = false;
+               result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
+               if (result != PPSMC_Result_OK) {
+                       DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+                       return -EINVAL;
+               }
+       } else {
+               thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+               WREG32_SMC(CG_THERMAL_INT, thermal_int);
+               rdev->irq.dpm_thermal = true;
+               result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
+               if (result != PPSMC_Result_OK) {
+                       DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
+{
+       struct ci_power_info *pi = ci_get_pi(rdev);
+       u32 tmp;
+
+       if (pi->fan_ctrl_is_in_default_mode) {
+               tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+               pi->fan_ctrl_default_mode = tmp;
+               tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+               pi->t_min = tmp;
+               pi->fan_ctrl_is_in_default_mode = false;
+       }
+
+       tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
+       tmp |= TMIN(0);
+       WREG32_SMC(CG_FDO_CTRL2, tmp);
+
+       tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+       tmp |= FDO_PWM_MODE(mode);
+       WREG32_SMC(CG_FDO_CTRL2, tmp);
+}
+
+static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
+{
+       struct ci_power_info *pi = ci_get_pi(rdev);
+       SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+       u32 duty100;
+       u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+       u16 fdo_min, slope1, slope2;
+       u32 reference_clock, tmp;
+       int ret;
+       u64 tmp64;
+
+       if (!pi->fan_table_start) {
+               rdev->pm.dpm.fan.ucode_fan_control = false;
+               return 0;
+       }
+
+       duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+       if (duty100 == 0) {
+               rdev->pm.dpm.fan.ucode_fan_control = false;
+               return 0;
+       }
+
+       tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
+       do_div(tmp64, 10000);
+       fdo_min = (u16)tmp64;
+
+       t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
+       t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
+
+       pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
+       pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
+
+       slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+       slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+       fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
+       fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
+       fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
+
+       fan_table.Slope1 = cpu_to_be16(slope1);
+       fan_table.Slope2 = cpu_to_be16(slope2);
+
+       fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+       fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
+
+       fan_table.HystUp = cpu_to_be16(1);
+
+       fan_table.HystSlope = cpu_to_be16(1);
+
+       fan_table.TempRespLim = cpu_to_be16(5);
+
+       reference_clock = radeon_get_xclk(rdev);
+
+       fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
+                                              reference_clock) / 1600);
+
+       fan_table.FdoMax = cpu_to_be16((u16)duty100);
+
+       tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+       fan_table.TempSrc = (uint8_t)tmp;
+
+       ret = ci_copy_bytes_to_smc(rdev,
+                                  pi->fan_table_start,
+                                  (u8 *)(&fan_table),
+                                  sizeof(fan_table),
+                                  pi->sram_end);
+
+       if (ret) {
+               DRM_ERROR("Failed to load fan table to the SMC.");
+               rdev->pm.dpm.fan.ucode_fan_control = false;
+       }
+
+       return 0;
+}
+
+static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
+{
+       struct ci_power_info *pi = ci_get_pi(rdev);
+       PPSMC_Result ret;
+
+       if (pi->caps_od_fuzzy_fan_control_support) {
+               ret = ci_send_msg_to_smc_with_parameter(rdev,
+                                                       PPSMC_StartFanControl,
+                                                       FAN_CONTROL_FUZZY);
+               if (ret != PPSMC_Result_OK)
+                       return -EINVAL;
+               ret = ci_send_msg_to_smc_with_parameter(rdev,
+                                                       PPSMC_MSG_SetFanPwmMax,
+                                                       rdev->pm.dpm.fan.default_max_fan_pwm);
+               if (ret != PPSMC_Result_OK)
+                       return -EINVAL;
+       } else {
+               ret = ci_send_msg_to_smc_with_parameter(rdev,
+                                                       PPSMC_StartFanControl,
+                                                       FAN_CONTROL_TABLE);
+               if (ret != PPSMC_Result_OK)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+#if 0
+static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
+{
+       PPSMC_Result ret;
+
+       ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
+       if (ret == PPSMC_Result_OK)
+               return 0;
+       else
+               return -EINVAL;
+}
+
+static int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
+                                            u32 *speed)
+{
+       u32 duty, duty100;
+       u64 tmp64;
+
+       if (rdev->pm.no_fan)
+               return -ENOENT;
+
+       duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+       duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+
+       if (duty100 == 0)
+               return -EINVAL;
+
+       tmp64 = (u64)duty * 100;
+       do_div(tmp64, duty100);
+       *speed = (u32)tmp64;
+
+       if (*speed > 100)
+               *speed = 100;
+
+       return 0;
+}
+
+static int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
+                                            u32 speed)
+{
+       u32 tmp;
+       u32 duty, duty100;
+       u64 tmp64;
+
+       if (rdev->pm.no_fan)
+               return -ENOENT;
+
+       if (speed > 100)
+               return -EINVAL;
+
+       if (rdev->pm.dpm.fan.ucode_fan_control)
+               ci_fan_ctrl_stop_smc_fan_control(rdev);
+
+       duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+       if (duty100 == 0)
+               return -EINVAL;
+
+       tmp64 = (u64)speed * duty100;
+       do_div(tmp64, 100);
+       duty = (u32)tmp64;
+
+       tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
+       tmp |= FDO_STATIC_DUTY(duty);
+       WREG32_SMC(CG_FDO_CTRL0, tmp);
+
+       ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+
+       return 0;
+}
+
+static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
+                                        u32 *speed)
+{
+       u32 tach_period;
+       u32 xclk = radeon_get_xclk(rdev);
+
+       if (rdev->pm.no_fan)
+               return -ENOENT;
+
+       if (rdev->pm.fan_pulses_per_revolution == 0)
+               return -ENOENT;
+
+       tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+       if (tach_period == 0)
+               return -ENOENT;
+
+       *speed = 60 * xclk * 10000 / tach_period;
+
+       return 0;
+}
+
+static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
+                                        u32 speed)
+{
+       u32 tach_period, tmp;
+       u32 xclk = radeon_get_xclk(rdev);
+
+       if (rdev->pm.no_fan)
+               return -ENOENT;
+
+       if (rdev->pm.fan_pulses_per_revolution == 0)
+               return -ENOENT;
+
+       if ((speed < rdev->pm.fan_min_rpm) ||
+           (speed > rdev->pm.fan_max_rpm))
+               return -EINVAL;
+
+       if (rdev->pm.dpm.fan.ucode_fan_control)
+               ci_fan_ctrl_stop_smc_fan_control(rdev);
+
+       tach_period = 60 * xclk * 10000 / (8 * speed);
+       tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
+       tmp |= TARGET_PERIOD(tach_period);
+       WREG32_SMC(CG_TACH_CTRL, tmp);
+
+       ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
+
+       return 0;
+}
+#endif
+
+static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
+{
+       struct ci_power_info *pi = ci_get_pi(rdev);
+       u32 tmp;
+
+       if (!pi->fan_ctrl_is_in_default_mode) {
+               tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+               tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
+               WREG32_SMC(CG_FDO_CTRL2, tmp);
+
+               tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
+               tmp |= TMIN(pi->t_min);
+               WREG32_SMC(CG_FDO_CTRL2, tmp);
+               pi->fan_ctrl_is_in_default_mode = true;
+       }
+}
+
+static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
+{
+       if (rdev->pm.dpm.fan.ucode_fan_control) {
+               ci_fan_ctrl_start_smc_fan_control(rdev);
+               ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+       }
+}
+
+static void ci_thermal_initialize(struct radeon_device *rdev)
+{
+       u32 tmp;
+
+       if (rdev->pm.fan_pulses_per_revolution) {
+               tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
+               tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
+               WREG32_SMC(CG_TACH_CTRL, tmp);
+       }
+
+       tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
+       tmp |= TACH_PWM_RESP_RATE(0x28);
+       WREG32_SMC(CG_FDO_CTRL2, tmp);
+}
+
+static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
+{
+       int ret;
+
+       ci_thermal_initialize(rdev);
+       ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+       if (ret)
+               return ret;
+       ret = ci_thermal_enable_alert(rdev, true);
+       if (ret)
+               return ret;
+       if (rdev->pm.dpm.fan.ucode_fan_control) {
+               ret = ci_thermal_setup_fan_table(rdev);
+               if (ret)
+                       return ret;
+               ci_thermal_start_smc_fan_control(rdev);
+       }
+
+       return 0;
+}
+
+static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
+{
+       if (!rdev->pm.no_fan)
+               ci_fan_ctrl_set_default_mode(rdev);
+}
+
 #if 0
 static int ci_read_smc_soft_register(struct radeon_device *rdev,
                                     u16 reg_offset, u32 *value)
@@ -1253,7 +1638,7 @@ static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
 
        if (!pi->sclk_dpm_key_disabled) {
                PPSMC_Result smc_result =
-                       ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
+                       ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
                if (smc_result != PPSMC_Result_OK)
                        return -EINVAL;
        }
@@ -1267,7 +1652,7 @@ static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
 
        if (!pi->mclk_dpm_key_disabled) {
                PPSMC_Result smc_result =
-                       ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
+                       ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
                if (smc_result != PPSMC_Result_OK)
                        return -EINVAL;
        }
@@ -2042,6 +2427,33 @@ static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
        return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
 }
 
+static void ci_register_patching_mc_arb(struct radeon_device *rdev,
+                                       const u32 engine_clock,
+                                       const u32 memory_clock,
+                                       u32 *dram_timimg2)
+{
+       bool patch;
+       u32 tmp, tmp2;
+
+       tmp = RREG32(MC_SEQ_MISC0);
+       patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+
+       if (patch &&
+           ((rdev->pdev->device == 0x67B0) ||
+            (rdev->pdev->device == 0x67B1))) {
+               if ((memory_clock > 100000) && (memory_clock <= 125000)) {
+                       tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
+                       *dram_timimg2 &= ~0x00ff0000;
+                       *dram_timimg2 |= tmp2 << 16;
+               } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
+                       tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
+                       *dram_timimg2 &= ~0x00ff0000;
+                       *dram_timimg2 |= tmp2 << 16;
+               }
+       }
+}
+
+
 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
                                                u32 sclk,
                                                u32 mclk,
@@ -2057,6 +2469,8 @@ static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
        dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
        burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
 
+       ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
+
        arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
        arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
        arb_regs->McArbBurstTime = (u8)burst_time;
@@ -2351,10 +2765,10 @@ static int ci_calculate_mclk_params(struct radeon_device *rdev,
                u32 tmp;
                u32 reference_clock = rdev->clock.mpll.reference_freq;
 
-               if (pi->mem_gddr5)
-                       freq_nom = memory_clock * 4;
+               if (mpll_param.qdr == 1)
+                       freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
                else
-                       freq_nom = memory_clock * 2;
+                       freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
 
                tmp = (freq_nom / reference_clock);
                tmp = tmp * tmp;
@@ -2434,7 +2848,6 @@ static int ci_populate_single_memory_level(struct radeon_device *rdev,
                                                      &memory_level->MinVddcPhases);
 
        memory_level->EnabledForThrottle = 1;
-       memory_level->EnabledForActivity = 1;
        memory_level->UpH = 0;
        memory_level->DownH = 100;
        memory_level->VoltageDownH = 0;
@@ -2767,7 +3180,6 @@ static int ci_populate_single_graphic_level(struct radeon_device *rdev,
 
        graphic_level->CcPwrDynRm = 0;
        graphic_level->CcPwrDynRm1 = 0;
-       graphic_level->EnabledForActivity = 1;
        graphic_level->EnabledForThrottle = 1;
        graphic_level->UpH = 0;
        graphic_level->DownH = 0;
@@ -2816,10 +3228,13 @@ static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
                                                       &pi->smc_state_table.GraphicsLevel[i]);
                if (ret)
                        return ret;
+               if (i > 1)
+                       pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
                if (i == (dpm_table->sclk_table.count - 1))
                        pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
                                PPSMC_DISPLAY_WATERMARK_HIGH;
        }
+       pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
 
        pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
        pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
@@ -2863,6 +3278,16 @@ static int ci_populate_all_memory_levels(struct radeon_device *rdev)
                        return ret;
        }
 
+       pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+       if ((dpm_table->mclk_table.count >= 2) &&
+           ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
+               pi->smc_state_table.MemoryLevel[1].MinVddc =
+                       pi->smc_state_table.MemoryLevel[0].MinVddc;
+               pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
+                       pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
+       }
+
        pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
 
        pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
@@ -2919,9 +3344,14 @@ static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
                                  &pi->dpm_table.pcie_speed_table,
                                  SMU7_MAX_LEVELS_LINK);
 
-       ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
-                                 pi->pcie_gen_powersaving.min,
-                                 pi->pcie_lane_powersaving.min);
+       if (rdev->family == CHIP_BONAIRE)
+               ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
+                                         pi->pcie_gen_powersaving.min,
+                                         pi->pcie_lane_powersaving.max);
+       else
+               ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
+                                         pi->pcie_gen_powersaving.min,
+                                         pi->pcie_lane_powersaving.min);
        ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
                                  pi->pcie_gen_performance.min,
                                  pi->pcie_lane_performance.min);
@@ -2988,19 +3418,21 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
                     allowed_sclk_vddc_table->entries[i].clk)) {
                        pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
                                allowed_sclk_vddc_table->entries[i].clk;
-                       pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
+                       pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
+                               (i == 0) ? true : false;
                        pi->dpm_table.sclk_table.count++;
                }
        }
 
        pi->dpm_table.mclk_table.count = 0;
        for (i = 0; i < allowed_mclk_table->count; i++) {
-               if ((i==0) ||
+               if ((i == 0) ||
                    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
                     allowed_mclk_table->entries[i].clk)) {
                        pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
                                allowed_mclk_table->entries[i].clk;
-                       pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
+                       pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
+                               (i == 0) ? true : false;
                        pi->dpm_table.mclk_table.count++;
                }
        }
@@ -3166,7 +3598,7 @@ static int ci_init_smc_table(struct radeon_device *rdev)
        table->VddcVddciDelta = 4000;
        table->PhaseResponseTime = 0;
        table->MemoryThermThrottleEnable = 1;
-       table->PCIeBootLinkLevel = 0;
+       table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
        table->PCIeGenInterval = 1;
        if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
                table->SVI2Enable  = 1;
@@ -3320,6 +3752,8 @@ static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
        struct ci_power_info *pi = ci_get_pi(rdev);
        PPSMC_Result result;
 
+       ci_apply_disp_minimum_voltage_request(rdev);
+
        if (!pi->sclk_dpm_key_disabled) {
                if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
                        result = ci_send_msg_to_smc_with_parameter(rdev,
@@ -3339,7 +3773,7 @@ static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
                                return -EINVAL;
                }
        }
-
+#if 0
        if (!pi->pcie_dpm_key_disabled) {
                if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
                        result = ci_send_msg_to_smc_with_parameter(rdev,
@@ -3349,9 +3783,7 @@ static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
                                return -EINVAL;
                }
        }
-
-       ci_apply_disp_minimum_voltage_request(rdev);
-
+#endif
        return 0;
 }
 
@@ -3377,7 +3809,7 @@ static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
                pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
        } else {
                /* XXX check display min clock requirements */
-               if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
+               if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
                        pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
        }
 
@@ -3707,62 +4139,61 @@ int ci_dpm_force_performance_level(struct radeon_device *rdev,
                                   enum radeon_dpm_forced_level level)
 {
        struct ci_power_info *pi = ci_get_pi(rdev);
-       PPSMC_Result smc_result;
        u32 tmp, levels, i;
        int ret;
 
        if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
-               if ((!pi->sclk_dpm_key_disabled) &&
-                   pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+               if ((!pi->pcie_dpm_key_disabled) &&
+                   pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
                        levels = 0;
-                       tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
+                       tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
                        while (tmp >>= 1)
                                levels++;
                        if (levels) {
-                               ret = ci_dpm_force_state_sclk(rdev, levels);
+                               ret = ci_dpm_force_state_pcie(rdev, level);
                                if (ret)
                                        return ret;
                                for (i = 0; i < rdev->usec_timeout; i++) {
-                                       tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
-                                              CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
+                                       tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
+                                              CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
                                        if (tmp == levels)
                                                break;
                                        udelay(1);
                                }
                        }
                }
-               if ((!pi->mclk_dpm_key_disabled) &&
-                   pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+               if ((!pi->sclk_dpm_key_disabled) &&
+                   pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
                        levels = 0;
-                       tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+                       tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
                        while (tmp >>= 1)
                                levels++;
                        if (levels) {
-                               ret = ci_dpm_force_state_mclk(rdev, levels);
+                               ret = ci_dpm_force_state_sclk(rdev, levels);
                                if (ret)
                                        return ret;
                                for (i = 0; i < rdev->usec_timeout; i++) {
                                        tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
-                                              CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
+                                              CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
                                        if (tmp == levels)
                                                break;
                                        udelay(1);
                                }
                        }
                }
-               if ((!pi->pcie_dpm_key_disabled) &&
-                   pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+               if ((!pi->mclk_dpm_key_disabled) &&
+                   pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
                        levels = 0;
-                       tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+                       tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
                        while (tmp >>= 1)
                                levels++;
                        if (levels) {
-                               ret = ci_dpm_force_state_pcie(rdev, level);
+                               ret = ci_dpm_force_state_mclk(rdev, levels);
                                if (ret)
                                        return ret;
                                for (i = 0; i < rdev->usec_timeout; i++) {
-                                       tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
-                                              CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
+                                       tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+                                              CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
                                        if (tmp == levels)
                                                break;
                                        udelay(1);
@@ -3816,21 +4247,17 @@ int ci_dpm_force_performance_level(struct radeon_device *rdev,
                        }
                }
        } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
-               if (!pi->sclk_dpm_key_disabled) {
-                       smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
-                       if (smc_result != PPSMC_Result_OK)
-                               return -EINVAL;
-               }
-               if (!pi->mclk_dpm_key_disabled) {
-                       smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
-                       if (smc_result != PPSMC_Result_OK)
-                               return -EINVAL;
-               }
                if (!pi->pcie_dpm_key_disabled) {
-                       smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
+                       PPSMC_Result smc_result;
+
+                       smc_result = ci_send_msg_to_smc(rdev,
+                                                       PPSMC_MSG_PCIeDPM_UnForceLevel);
                        if (smc_result != PPSMC_Result_OK)
                                return -EINVAL;
                }
+               ret = ci_upload_dpm_level_enable_mask(rdev);
+               if (ret)
+                       return ret;
        }
 
        rdev->pm.dpm.forced_level = level;
@@ -4036,6 +4463,96 @@ static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
        return 0;
 }
 
+static int ci_register_patching_mc_seq(struct radeon_device *rdev,
+                                      struct ci_mc_reg_table *table)
+{
+       u8 i, k;
+       u32 tmp;
+       bool patch;
+
+       tmp = RREG32(MC_SEQ_MISC0);
+       patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+
+       if (patch &&
+           ((rdev->pdev->device == 0x67B0) ||
+            (rdev->pdev->device == 0x67B1))) {
+               for (i = 0; i < table->last; i++) {
+                       if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+                               return -EINVAL;
+                       switch(table->mc_reg_address[i].s1 >> 2) {
+                       case MC_SEQ_MISC1:
+                               for (k = 0; k < table->num_entries; k++) {
+                                       if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+                                           (table->mc_reg_table_entry[k].mclk_max == 137500))
+                                               table->mc_reg_table_entry[k].mc_data[i] =
+                                                       (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
+                                                       0x00000007;
+                               }
+                               break;
+                       case MC_SEQ_WR_CTL_D0:
+                               for (k = 0; k < table->num_entries; k++) {
+                                       if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+                                           (table->mc_reg_table_entry[k].mclk_max == 137500))
+                                               table->mc_reg_table_entry[k].mc_data[i] =
+                                                       (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
+                                                       0x0000D0DD;
+                               }
+                               break;
+                       case MC_SEQ_WR_CTL_D1:
+                               for (k = 0; k < table->num_entries; k++) {
+                                       if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+                                           (table->mc_reg_table_entry[k].mclk_max == 137500))
+                                               table->mc_reg_table_entry[k].mc_data[i] =
+                                                       (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
+                                                       0x0000D0DD;
+                               }
+                               break;
+                       case MC_SEQ_WR_CTL_2:
+                               for (k = 0; k < table->num_entries; k++) {
+                                       if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+                                           (table->mc_reg_table_entry[k].mclk_max == 137500))
+                                               table->mc_reg_table_entry[k].mc_data[i] = 0;
+                               }
+                               break;
+                       case MC_SEQ_CAS_TIMING:
+                               for (k = 0; k < table->num_entries; k++) {
+                                       if (table->mc_reg_table_entry[k].mclk_max == 125000)
+                                               table->mc_reg_table_entry[k].mc_data[i] =
+                                                       (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
+                                                       0x000C0140;
+                                       else if (table->mc_reg_table_entry[k].mclk_max == 137500)
+                                               table->mc_reg_table_entry[k].mc_data[i] =
+                                                       (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
+                                                       0x000C0150;
+                               }
+                               break;
+                       case MC_SEQ_MISC_TIMING:
+                               for (k = 0; k < table->num_entries; k++) {
+                                       if (table->mc_reg_table_entry[k].mclk_max == 125000)
+                                               table->mc_reg_table_entry[k].mc_data[i] =
+                                                       (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
+                                                       0x00000030;
+                                       else if (table->mc_reg_table_entry[k].mclk_max == 137500)
+                                               table->mc_reg_table_entry[k].mc_data[i] =
+                                                       (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
+                                                       0x00000035;
+                               }
+                               break;
+                       default:
+                               break;
+                       }
+               }
+
+               WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
+               tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
+               tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
+               WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
+               WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
+       }
+
+       return 0;
+}
+
 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
 {
        struct ci_power_info *pi = ci_get_pi(rdev);
@@ -4079,6 +4596,10 @@ static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
 
        ci_set_s0_mc_reg_index(ci_table);
 
+       ret = ci_register_patching_mc_seq(rdev, ci_table);
+       if (ret)
+               goto init_mc_done;
+
        ret = ci_set_mc_special_registers(rdev, ci_table);
        if (ret)
                goto init_mc_done;
@@ -4675,36 +5196,51 @@ int ci_dpm_enable(struct radeon_device *rdev)
                return ret;
        }
 
+       ret = ci_power_control_set_level(rdev);
+       if (ret) {
+               DRM_ERROR("ci_power_control_set_level failed\n");
+               return ret;
+       }
+
        ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
 
+       ret = ci_enable_thermal_based_sclk_dpm(rdev, true);
+       if (ret) {
+               DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
+               return ret;
+       }
+
+       ci_thermal_start_thermal_controller(rdev);
+
        ci_update_current_ps(rdev, boot_ps);
 
        return 0;
 }
 
-int ci_dpm_late_enable(struct radeon_device *rdev)
+static int ci_set_temperature_range(struct radeon_device *rdev)
 {
        int ret;
 
-       if (rdev->irq.installed &&
-           r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
-#if 0
-               PPSMC_Result result;
-#endif
-               ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
-               if (ret) {
-                       DRM_ERROR("ci_set_thermal_temperature_range failed\n");
-                       return ret;
-               }
-               rdev->irq.dpm_thermal = true;
-               radeon_irq_set(rdev);
-#if 0
-               result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
+       ret = ci_thermal_enable_alert(rdev, false);
+       if (ret)
+               return ret;
+       ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+       if (ret)
+               return ret;
+       ret = ci_thermal_enable_alert(rdev, true);
+       if (ret)
+               return ret;
 
-               if (result != PPSMC_Result_OK)
-                       DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
-#endif
-       }
+       return ret;
+}
+
+int ci_dpm_late_enable(struct radeon_device *rdev)
+{
+       int ret;
+
+       ret = ci_set_temperature_range(rdev);
+       if (ret)
+               return ret;
 
        ci_dpm_powergate_uvd(rdev, true);
 
@@ -4721,6 +5257,8 @@ void ci_dpm_disable(struct radeon_device *rdev)
        if (!ci_is_smc_running(rdev))
                return;
 
+       ci_thermal_stop_thermal_controller(rdev);
+
        if (pi->thermal_protection)
                ci_enable_thermal_protection(rdev, false);
        ci_enable_power_containment(rdev, false);
@@ -4729,12 +5267,13 @@ void ci_dpm_disable(struct radeon_device *rdev)
        ci_enable_spread_spectrum(rdev, false);
        ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
        ci_stop_dpm(rdev);
-       ci_enable_ds_master_switch(rdev, true);
+       ci_enable_ds_master_switch(rdev, false);
        ci_enable_ulv(rdev, false);
        ci_clear_vc(rdev);
        ci_reset_to_default(rdev);
        ci_dpm_stop_smc(rdev);
        ci_force_switch_to_arb_f0(rdev);
+       ci_enable_thermal_based_sclk_dpm(rdev, false);
 
        ci_update_current_ps(rdev, boot_ps);
 }
@@ -4804,11 +5343,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
        return 0;
 }
 
-int ci_dpm_power_control_set_level(struct radeon_device *rdev)
-{
-       return ci_power_control_set_level(rdev);
-}
-
 void ci_dpm_reset_asic(struct radeon_device *rdev)
 {
        ci_set_boot_state(rdev);
@@ -5068,6 +5602,8 @@ void ci_dpm_fini(struct radeon_device *rdev)
 int ci_dpm_init(struct radeon_device *rdev)
 {
        int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+       SMU7_Discrete_DpmTable  *dpm_table;
+       struct radeon_gpio_rec gpio;
        u16 data_offset, size;
        u8 frev, crev;
        struct ci_power_info *pi;
@@ -5137,6 +5673,7 @@ int ci_dpm_init(struct radeon_device *rdev)
        pi->sclk_dpm_key_disabled = 0;
        pi->mclk_dpm_key_disabled = 0;
        pi->pcie_dpm_key_disabled = 0;
+       pi->thermal_sclk_dpm_enabled = 0;
 
        /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
        if ((rdev->pdev->device == 0x6658) &&
@@ -5201,6 +5738,55 @@ int ci_dpm_init(struct radeon_device *rdev)
 
        pi->uvd_enabled = false;
 
+       dpm_table = &pi->smc_state_table;
+
+       gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
+       if (gpio.valid) {
+               dpm_table->VRHotGpio = gpio.shift;
+               rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
+       } else {
+               dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
+               rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
+       }
+
+       gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
+       if (gpio.valid) {
+               dpm_table->AcDcGpio = gpio.shift;
+               rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
+       } else {
+               dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
+               rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
+       }
+
+       gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
+       if (gpio.valid) {
+               u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
+
+               switch (gpio.shift) {
+               case 0:
+                       tmp &= ~GNB_SLOW_MODE_MASK;
+                       tmp |= GNB_SLOW_MODE(1);
+                       break;
+               case 1:
+                       tmp &= ~GNB_SLOW_MODE_MASK;
+                       tmp |= GNB_SLOW_MODE(2);
+                       break;
+               case 2:
+                       tmp |= GNB_SLOW;
+                       break;
+               case 3:
+                       tmp |= FORCE_NB_PS1;
+                       break;
+               case 4:
+                       tmp |= DPM_ENABLED;
+                       break;
+               default:
+                       DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
+                       break;
+               }
+               WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
+       }
+
        pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
        pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
        pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
@@ -5262,6 +5848,8 @@ int ci_dpm_init(struct radeon_device *rdev)
                rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
                        rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
 
+       pi->fan_ctrl_is_in_default_mode = true;
+
        return 0;
 }
 
index 93bbed977ffb4631ef77eb57a5fa937f57b67cb6..84e3d3bcf9f327a6a0f410d951ce841090bf9bb0 100644 (file)
@@ -33,6 +33,8 @@
 
 #define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
 
+#define CISLANDS_UNUSED_GPIO_PIN 0x7F
+
 struct ci_pl {
        u32 mclk;
        u32 sclk;
@@ -237,6 +239,7 @@ struct ci_power_info {
        u32 sclk_dpm_key_disabled;
        u32 mclk_dpm_key_disabled;
        u32 pcie_dpm_key_disabled;
+       u32 thermal_sclk_dpm_enabled;
        struct ci_pcie_perf_range pcie_gen_performance;
        struct ci_pcie_perf_range pcie_lane_performance;
        struct ci_pcie_perf_range pcie_gen_powersaving;
@@ -264,6 +267,7 @@ struct ci_power_info {
        bool caps_automatic_dc_transition;
        bool caps_sclk_throttle_low_notification;
        bool caps_dynamic_ac_timing;
+       bool caps_od_fuzzy_fan_control_support;
        /* flags */
        bool thermal_protection;
        bool pcie_performance_request;
@@ -285,6 +289,10 @@ struct ci_power_info {
        struct ci_ps current_ps;
        struct radeon_ps requested_rps;
        struct ci_ps requested_ps;
+       /* fan control */
+       bool fan_ctrl_is_in_default_mode;
+       u32 t_min;
+       u32 fan_ctrl_default_mode;
 };
 
 #define CISLANDS_VOLTAGE_CONTROL_NONE                   0x0
index b630edc2fd0c5d2b27c78bc8042eece5041076d5..e78bcad7a43e0dbef02ceb4854cef8d76837573b 100644 (file)
@@ -129,7 +129,7 @@ void ci_reset_smc(struct radeon_device *rdev)
 
 int ci_program_jump_on_start(struct radeon_device *rdev)
 {
-       static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
+       static const u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
 
        return ci_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
 }
index 377afa504d2bd045cfdc5f2eed06599610c30920..6dcde3798b45a026f0be30f8e7bffb8b254ace81 100644 (file)
@@ -32,6 +32,7 @@
 #include "cik_blit_shaders.h"
 #include "radeon_ucode.h"
 #include "clearstate_ci.h"
+#include "radeon_kfd.h"
 
 MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
 MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
@@ -1563,6 +1564,8 @@ static const u32 godavari_golden_registers[] =
 
 static void cik_init_golden_registers(struct radeon_device *rdev)
 {
+       /* Some of the registers might be dependent on GRBM_GFX_INDEX */
+       mutex_lock(&rdev->grbm_idx_mutex);
        switch (rdev->family) {
        case CHIP_BONAIRE:
                radeon_program_register_sequence(rdev,
@@ -1637,6 +1640,7 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
        default:
                break;
        }
+       mutex_unlock(&rdev->grbm_idx_mutex);
 }
 
 /**
@@ -1806,7 +1810,7 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
 {
        const __be32 *fw_data = NULL;
        const __le32 *new_fw_data = NULL;
-       u32 running, blackout = 0;
+       u32 running, blackout = 0, tmp;
        u32 *io_mc_regs = NULL;
        const __le32 *new_io_mc_regs = NULL;
        int i, regs_size, ucode_size;
@@ -1866,6 +1870,15 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
                                WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
                        }
                }
+
+               tmp = RREG32(MC_SEQ_MISC0);
+               if ((rdev->pdev->device == 0x6649) && ((tmp & 0xff00) == 0x5600)) {
+                       WREG32(MC_SEQ_IO_DEBUG_INDEX, 5);
+                       WREG32(MC_SEQ_IO_DEBUG_DATA, 0x00000023);
+                       WREG32(MC_SEQ_IO_DEBUG_INDEX, 9);
+                       WREG32(MC_SEQ_IO_DEBUG_DATA, 0x000001f0);
+               }
+
                /* load the MC ucode */
                for (i = 0; i < ucode_size; i++) {
                        if (rdev->new_fw)
@@ -3419,6 +3432,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
        u32 disabled_rbs = 0;
        u32 enabled_rbs = 0;
 
+       mutex_lock(&rdev->grbm_idx_mutex);
        for (i = 0; i < se_num; i++) {
                for (j = 0; j < sh_per_se; j++) {
                        cik_select_se_sh(rdev, i, j);
@@ -3430,6 +3444,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
                }
        }
        cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+       mutex_unlock(&rdev->grbm_idx_mutex);
 
        mask = 1;
        for (i = 0; i < max_rb_num_per_se * se_num; i++) {
@@ -3440,6 +3455,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
 
        rdev->config.cik.backend_enable_mask = enabled_rbs;
 
+       mutex_lock(&rdev->grbm_idx_mutex);
        for (i = 0; i < se_num; i++) {
                cik_select_se_sh(rdev, i, 0xffffffff);
                data = 0;
@@ -3467,6 +3483,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
                WREG32(PA_SC_RASTER_CONFIG, data);
        }
        cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+       mutex_unlock(&rdev->grbm_idx_mutex);
 }
 
 /**
@@ -3684,6 +3701,12 @@ static void cik_gpu_init(struct radeon_device *rdev)
        /* set HW defaults for 3D engine */
        WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
 
+       mutex_lock(&rdev->grbm_idx_mutex);
+       /*
+        * making sure that the following register writes will be broadcasted
+        * to all the shaders
+        */
+       cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
        WREG32(SX_DEBUG_1, 0x20);
 
        WREG32(TA_CNTL_AUX, 0x00010000);
@@ -3739,6 +3762,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
 
        WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
        WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
+       mutex_unlock(&rdev->grbm_idx_mutex);
 
        udelay(50);
 }
@@ -3970,31 +3994,27 @@ struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
                                    unsigned num_gpu_pages,
                                    struct reservation_object *resv)
 {
-       struct radeon_semaphore *sem = NULL;
        struct radeon_fence *fence;
+       struct radeon_sync sync;
        int ring_index = rdev->asic->copy.blit_ring_index;
        struct radeon_ring *ring = &rdev->ring[ring_index];
        u32 size_in_bytes, cur_size_in_bytes, control;
        int i, num_loops;
        int r = 0;
 
-       r = radeon_semaphore_create(rdev, &sem);
-       if (r) {
-               DRM_ERROR("radeon: moving bo (%d).\n", r);
-               return ERR_PTR(r);
-       }
+       radeon_sync_create(&sync);
 
        size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
        num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
        r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
-               radeon_semaphore_free(rdev, &sem, NULL);
+               radeon_sync_free(rdev, &sync, NULL);
                return ERR_PTR(r);
        }
 
-       radeon_semaphore_sync_resv(rdev, sem, resv, false);
-       radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+       radeon_sync_resv(rdev, &sync, resv, false);
+       radeon_sync_rings(rdev, &sync, ring->idx);
 
        for (i = 0; i < num_loops; i++) {
                cur_size_in_bytes = size_in_bytes;
@@ -4018,12 +4038,12 @@ struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, &fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
-               radeon_semaphore_free(rdev, &sem, NULL);
+               radeon_sync_free(rdev, &sync, NULL);
                return ERR_PTR(r);
        }
 
        radeon_ring_unlock_commit(rdev, ring, false);
-       radeon_semaphore_free(rdev, &sem, fence);
+       radeon_sync_free(rdev, &sync, fence);
 
        return fence;
 }
@@ -4046,6 +4066,7 @@ struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
 void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
        struct radeon_ring *ring = &rdev->ring[ib->ring];
+       unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
        u32 header, control = INDIRECT_BUFFER_VALID;
 
        if (ib->is_const_ib) {
@@ -4074,8 +4095,7 @@ void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
                header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
        }
 
-       control |= ib->length_dw |
-               (ib->vm ? (ib->vm->id << 24) : 0);
+       control |= ib->length_dw | (vm_id << 24);
 
        radeon_ring_write(ring, header);
        radeon_ring_write(ring,
@@ -4313,8 +4333,8 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
        /* init the CE partitions.  CE only used for gfx on CIK */
        radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
        radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
-       radeon_ring_write(ring, 0xc000);
-       radeon_ring_write(ring, 0xc000);
+       radeon_ring_write(ring, 0x8000);
+       radeon_ring_write(ring, 0x8000);
 
        /* setup clear context state */
        radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
@@ -4675,12 +4695,11 @@ static int cik_mec_init(struct radeon_device *rdev)
        /*
         * KV:    2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
         * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
+        * Nonetheless, we assign only 1 pipe because all other pipes will
+        * be handled by KFD
         */
-       if (rdev->family == CHIP_KAVERI)
-               rdev->mec.num_mec = 2;
-       else
-               rdev->mec.num_mec = 1;
-       rdev->mec.num_pipe = 4;
+       rdev->mec.num_mec = 1;
+       rdev->mec.num_pipe = 1;
        rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
 
        if (rdev->mec.hpd_eop_obj == NULL) {
@@ -4822,28 +4841,24 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
 
        /* init the pipes */
        mutex_lock(&rdev->srbm_mutex);
-       for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
-               int me = (i < 4) ? 1 : 2;
-               int pipe = (i < 4) ? i : (i - 4);
 
-               eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
+       eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr;
+
+       cik_srbm_select(rdev, 0, 0, 0, 0);
 
-               cik_srbm_select(rdev, me, pipe, 0, 0);
+       /* write the EOP addr */
+       WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
+       WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
 
-               /* write the EOP addr */
-               WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
-               WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
+       /* set the VMID assigned */
+       WREG32(CP_HPD_EOP_VMID, 0);
 
-               /* set the VMID assigned */
-               WREG32(CP_HPD_EOP_VMID, 0);
+       /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+       tmp = RREG32(CP_HPD_EOP_CONTROL);
+       tmp &= ~EOP_SIZE_MASK;
+       tmp |= order_base_2(MEC_HPD_SIZE / 8);
+       WREG32(CP_HPD_EOP_CONTROL, tmp);
 
-               /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
-               tmp = RREG32(CP_HPD_EOP_CONTROL);
-               tmp &= ~EOP_SIZE_MASK;
-               tmp |= order_base_2(MEC_HPD_SIZE / 8);
-               WREG32(CP_HPD_EOP_CONTROL, tmp);
-       }
-       cik_srbm_select(rdev, 0, 0, 0, 0);
        mutex_unlock(&rdev->srbm_mutex);
 
        /* init the queues.  Just two for now. */
@@ -5897,8 +5912,13 @@ int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
  */
 int cik_vm_init(struct radeon_device *rdev)
 {
-       /* number of VMs */
-       rdev->vm_manager.nvm = 16;
+       /*
+        * number of VMs
+        * VMID 0 is reserved for System
+        * radeon graphics/compute will use VMIDs 1-7
+        * amdkfd will use VMIDs 8-15
+        */
+       rdev->vm_manager.nvm = RADEON_NUM_OF_VMIDS;
        /* base offset of vram pages */
        if (rdev->flags & RADEON_IS_IGP) {
                u64 tmp = RREG32(MC_VM_FB_OFFSET);
@@ -5958,26 +5978,23 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
  * Update the page table base and flush the VM TLB
  * using the CP (CIK).
  */
-void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+                 unsigned vm_id, uint64_t pd_addr)
 {
-       struct radeon_ring *ring = &rdev->ring[ridx];
-       int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
-
-       if (vm == NULL)
-               return;
+       int usepfp = (ring->idx == RADEON_RING_TYPE_GFX_INDEX);
 
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
        radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
                                 WRITE_DATA_DST_SEL(0)));
-       if (vm->id < 8) {
+       if (vm_id < 8) {
                radeon_ring_write(ring,
-                                 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+                                 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
        } else {
                radeon_ring_write(ring,
-                                 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+                                 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
        }
        radeon_ring_write(ring, 0);
-       radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+       radeon_ring_write(ring, pd_addr >> 12);
 
        /* update SH_MEM_* regs */
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -5985,7 +6002,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
                                 WRITE_DATA_DST_SEL(0)));
        radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
        radeon_ring_write(ring, 0);
-       radeon_ring_write(ring, VMID(vm->id));
+       radeon_ring_write(ring, VMID(vm_id));
 
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
        radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -6006,7 +6023,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
        radeon_ring_write(ring, VMID(0));
 
        /* HDP flush */
-       cik_hdp_flush_cp_ring_emit(rdev, ridx);
+       cik_hdp_flush_cp_ring_emit(rdev, ring->idx);
 
        /* bits 0-15 are the VM contexts0-15 */
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -6014,7 +6031,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
                                 WRITE_DATA_DST_SEL(0)));
        radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
        radeon_ring_write(ring, 0);
-       radeon_ring_write(ring, 1 << vm->id);
+       radeon_ring_write(ring, 1 << vm_id);
 
        /* compute doesn't have PFP */
        if (usepfp) {
@@ -6059,6 +6076,7 @@ static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
        u32 i, j, k;
        u32 mask;
 
+       mutex_lock(&rdev->grbm_idx_mutex);
        for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
                for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
                        cik_select_se_sh(rdev, i, j);
@@ -6070,6 +6088,7 @@ static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
                }
        }
        cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+       mutex_unlock(&rdev->grbm_idx_mutex);
 
        mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
        for (k = 0; k < rdev->usec_timeout; k++) {
@@ -6204,10 +6223,12 @@ static int cik_rlc_resume(struct radeon_device *rdev)
        WREG32(RLC_LB_CNTR_INIT, 0);
        WREG32(RLC_LB_CNTR_MAX, 0x00008000);
 
+       mutex_lock(&rdev->grbm_idx_mutex);
        cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
        WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
        WREG32(RLC_LB_PARAMS, 0x00600408);
        WREG32(RLC_LB_CNTL, 0x80000004);
+       mutex_unlock(&rdev->grbm_idx_mutex);
 
        WREG32(RLC_MC_CNTL, 0);
        WREG32(RLC_UCODE_CNTL, 0);
@@ -6274,11 +6295,13 @@ static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
 
                tmp = cik_halt_rlc(rdev);
 
+               mutex_lock(&rdev->grbm_idx_mutex);
                cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
                WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
                WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
                tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
                WREG32(RLC_SERDES_WR_CTRL, tmp2);
+               mutex_unlock(&rdev->grbm_idx_mutex);
 
                cik_update_rlc(rdev, tmp);
 
@@ -6314,17 +6337,20 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
                }
 
                orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+               data |= 0x00000001;
                data &= 0xfffffffd;
                if (orig != data)
                        WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
 
                tmp = cik_halt_rlc(rdev);
 
+               mutex_lock(&rdev->grbm_idx_mutex);
                cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
                WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
                WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
                data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
                WREG32(RLC_SERDES_WR_CTRL, data);
+               mutex_unlock(&rdev->grbm_idx_mutex);
 
                cik_update_rlc(rdev, tmp);
 
@@ -6345,7 +6371,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
                }
        } else {
                orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
-               data |= 0x00000002;
+               data |= 0x00000003;
                if (orig != data)
                        WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
 
@@ -6368,11 +6394,13 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
 
                tmp = cik_halt_rlc(rdev);
 
+               mutex_lock(&rdev->grbm_idx_mutex);
                cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
                WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
                WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
                data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
                WREG32(RLC_SERDES_WR_CTRL, data);
+               mutex_unlock(&rdev->grbm_idx_mutex);
 
                cik_update_rlc(rdev, tmp);
        }
@@ -6801,10 +6829,12 @@ static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
        u32 mask = 0, tmp, tmp1;
        int i;
 
+       mutex_lock(&rdev->grbm_idx_mutex);
        cik_select_se_sh(rdev, se, sh);
        tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
        tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
        cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+       mutex_unlock(&rdev->grbm_idx_mutex);
 
        tmp &= 0xffff0000;
 
@@ -7288,8 +7318,7 @@ static int cik_irq_init(struct radeon_device *rdev)
 int cik_irq_set(struct radeon_device *rdev)
 {
        u32 cp_int_cntl;
-       u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
-       u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
+       u32 cp_m1p0;
        u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
        u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
        u32 grbm_int_cntl = 0;
@@ -7323,13 +7352,6 @@ int cik_irq_set(struct radeon_device *rdev)
        dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
 
        cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
-       cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
-       cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
-       cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
-       cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
-       cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
-       cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
-       cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
 
        if (rdev->flags & RADEON_IS_IGP)
                thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
@@ -7351,33 +7373,6 @@ int cik_irq_set(struct radeon_device *rdev)
                        case 0:
                                cp_m1p0 |= TIME_STAMP_INT_ENABLE;
                                break;
-                       case 1:
-                               cp_m1p1 |= TIME_STAMP_INT_ENABLE;
-                               break;
-                       case 2:
-                               cp_m1p2 |= TIME_STAMP_INT_ENABLE;
-                               break;
-                       case 3:
-                               cp_m1p2 |= TIME_STAMP_INT_ENABLE;
-                               break;
-                       default:
-                               DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
-                               break;
-                       }
-               } else if (ring->me == 2) {
-                       switch (ring->pipe) {
-                       case 0:
-                               cp_m2p0 |= TIME_STAMP_INT_ENABLE;
-                               break;
-                       case 1:
-                               cp_m2p1 |= TIME_STAMP_INT_ENABLE;
-                               break;
-                       case 2:
-                               cp_m2p2 |= TIME_STAMP_INT_ENABLE;
-                               break;
-                       case 3:
-                               cp_m2p2 |= TIME_STAMP_INT_ENABLE;
-                               break;
                        default:
                                DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
                                break;
@@ -7394,33 +7389,6 @@ int cik_irq_set(struct radeon_device *rdev)
                        case 0:
                                cp_m1p0 |= TIME_STAMP_INT_ENABLE;
                                break;
-                       case 1:
-                               cp_m1p1 |= TIME_STAMP_INT_ENABLE;
-                               break;
-                       case 2:
-                               cp_m1p2 |= TIME_STAMP_INT_ENABLE;
-                               break;
-                       case 3:
-                               cp_m1p2 |= TIME_STAMP_INT_ENABLE;
-                               break;
-                       default:
-                               DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
-                               break;
-                       }
-               } else if (ring->me == 2) {
-                       switch (ring->pipe) {
-                       case 0:
-                               cp_m2p0 |= TIME_STAMP_INT_ENABLE;
-                               break;
-                       case 1:
-                               cp_m2p1 |= TIME_STAMP_INT_ENABLE;
-                               break;
-                       case 2:
-                               cp_m2p2 |= TIME_STAMP_INT_ENABLE;
-                               break;
-                       case 3:
-                               cp_m2p2 |= TIME_STAMP_INT_ENABLE;
-                               break;
                        default:
                                DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
                                break;
@@ -7509,13 +7477,6 @@ int cik_irq_set(struct radeon_device *rdev)
        WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
 
        WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
-       WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
-       WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
-       WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
-       WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
-       WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
-       WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
-       WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
 
        WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
@@ -7832,6 +7793,10 @@ restart_ih:
        while (rptr != wptr) {
                /* wptr/rptr are in bytes! */
                ring_index = rptr / 4;
+
+               radeon_kfd_interrupt(rdev,
+                               (const void *) &rdev->ih.ring[ring_index]);
+
                src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
                src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
                ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
@@ -8521,6 +8486,10 @@ static int cik_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_kfd_resume(rdev);
+       if (r)
+               return r;
+
        return 0;
 }
 
@@ -8569,6 +8538,7 @@ int cik_resume(struct radeon_device *rdev)
  */
 int cik_suspend(struct radeon_device *rdev)
 {
+       radeon_kfd_suspend(rdev);
        radeon_pm_suspend(rdev);
        dce6_audio_fini(rdev);
        radeon_vm_manager_fini(rdev);
@@ -9447,6 +9417,9 @@ void dce8_bandwidth_update(struct radeon_device *rdev)
        u32 num_heads = 0, lb_size;
        int i;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        for (i = 0; i < rdev->num_crtc; i++) {
index ca1bb6133580aaa30579945e32810dba8317ca8e..79c45e8a536b03405d061dec6e41eb0f3eea6ca7 100644 (file)
 
 #define CIK_LB_DESKTOP_HEIGHT                     0x6b0c
 
+#define CP_HQD_IQ_RPTR                                 0xC970u
+#define AQL_ENABLE                                     (1U << 0)
+
+#define IDLE                                   (1 << 2)
+
+struct cik_mqd {
+       uint32_t header;
+       uint32_t compute_dispatch_initiator;
+       uint32_t compute_dim_x;
+       uint32_t compute_dim_y;
+       uint32_t compute_dim_z;
+       uint32_t compute_start_x;
+       uint32_t compute_start_y;
+       uint32_t compute_start_z;
+       uint32_t compute_num_thread_x;
+       uint32_t compute_num_thread_y;
+       uint32_t compute_num_thread_z;
+       uint32_t compute_pipelinestat_enable;
+       uint32_t compute_perfcount_enable;
+       uint32_t compute_pgm_lo;
+       uint32_t compute_pgm_hi;
+       uint32_t compute_tba_lo;
+       uint32_t compute_tba_hi;
+       uint32_t compute_tma_lo;
+       uint32_t compute_tma_hi;
+       uint32_t compute_pgm_rsrc1;
+       uint32_t compute_pgm_rsrc2;
+       uint32_t compute_vmid;
+       uint32_t compute_resource_limits;
+       uint32_t compute_static_thread_mgmt_se0;
+       uint32_t compute_static_thread_mgmt_se1;
+       uint32_t compute_tmpring_size;
+       uint32_t compute_static_thread_mgmt_se2;
+       uint32_t compute_static_thread_mgmt_se3;
+       uint32_t compute_restart_x;
+       uint32_t compute_restart_y;
+       uint32_t compute_restart_z;
+       uint32_t compute_thread_trace_enable;
+       uint32_t compute_misc_reserved;
+       uint32_t compute_user_data_0;
+       uint32_t compute_user_data_1;
+       uint32_t compute_user_data_2;
+       uint32_t compute_user_data_3;
+       uint32_t compute_user_data_4;
+       uint32_t compute_user_data_5;
+       uint32_t compute_user_data_6;
+       uint32_t compute_user_data_7;
+       uint32_t compute_user_data_8;
+       uint32_t compute_user_data_9;
+       uint32_t compute_user_data_10;
+       uint32_t compute_user_data_11;
+       uint32_t compute_user_data_12;
+       uint32_t compute_user_data_13;
+       uint32_t compute_user_data_14;
+       uint32_t compute_user_data_15;
+       uint32_t cp_compute_csinvoc_count_lo;
+       uint32_t cp_compute_csinvoc_count_hi;
+       uint32_t cp_mqd_base_addr_lo;
+       uint32_t cp_mqd_base_addr_hi;
+       uint32_t cp_hqd_active;
+       uint32_t cp_hqd_vmid;
+       uint32_t cp_hqd_persistent_state;
+       uint32_t cp_hqd_pipe_priority;
+       uint32_t cp_hqd_queue_priority;
+       uint32_t cp_hqd_quantum;
+       uint32_t cp_hqd_pq_base_lo;
+       uint32_t cp_hqd_pq_base_hi;
+       uint32_t cp_hqd_pq_rptr;
+       uint32_t cp_hqd_pq_rptr_report_addr_lo;
+       uint32_t cp_hqd_pq_rptr_report_addr_hi;
+       uint32_t cp_hqd_pq_wptr_poll_addr_lo;
+       uint32_t cp_hqd_pq_wptr_poll_addr_hi;
+       uint32_t cp_hqd_pq_doorbell_control;
+       uint32_t cp_hqd_pq_wptr;
+       uint32_t cp_hqd_pq_control;
+       uint32_t cp_hqd_ib_base_addr_lo;
+       uint32_t cp_hqd_ib_base_addr_hi;
+       uint32_t cp_hqd_ib_rptr;
+       uint32_t cp_hqd_ib_control;
+       uint32_t cp_hqd_iq_timer;
+       uint32_t cp_hqd_iq_rptr;
+       uint32_t cp_hqd_dequeue_request;
+       uint32_t cp_hqd_dma_offload;
+       uint32_t cp_hqd_sema_cmd;
+       uint32_t cp_hqd_msg_type;
+       uint32_t cp_hqd_atomic0_preop_lo;
+       uint32_t cp_hqd_atomic0_preop_hi;
+       uint32_t cp_hqd_atomic1_preop_lo;
+       uint32_t cp_hqd_atomic1_preop_hi;
+       uint32_t cp_hqd_hq_status0;
+       uint32_t cp_hqd_hq_control0;
+       uint32_t cp_mqd_control;
+       uint32_t cp_mqd_query_time_lo;
+       uint32_t cp_mqd_query_time_hi;
+       uint32_t cp_mqd_connect_start_time_lo;
+       uint32_t cp_mqd_connect_start_time_hi;
+       uint32_t cp_mqd_connect_end_time_lo;
+       uint32_t cp_mqd_connect_end_time_hi;
+       uint32_t cp_mqd_connect_end_wf_count;
+       uint32_t cp_mqd_connect_end_pq_rptr;
+       uint32_t cp_mqd_connect_end_pq_wptr;
+       uint32_t cp_mqd_connect_end_ib_rptr;
+       uint32_t reserved_96;
+       uint32_t reserved_97;
+       uint32_t reserved_98;
+       uint32_t reserved_99;
+       uint32_t iqtimer_pkt_header;
+       uint32_t iqtimer_pkt_dw0;
+       uint32_t iqtimer_pkt_dw1;
+       uint32_t iqtimer_pkt_dw2;
+       uint32_t iqtimer_pkt_dw3;
+       uint32_t iqtimer_pkt_dw4;
+       uint32_t iqtimer_pkt_dw5;
+       uint32_t iqtimer_pkt_dw6;
+       uint32_t reserved_108;
+       uint32_t reserved_109;
+       uint32_t reserved_110;
+       uint32_t reserved_111;
+       uint32_t queue_doorbell_id0;
+       uint32_t queue_doorbell_id1;
+       uint32_t queue_doorbell_id2;
+       uint32_t queue_doorbell_id3;
+       uint32_t queue_doorbell_id4;
+       uint32_t queue_doorbell_id5;
+       uint32_t queue_doorbell_id6;
+       uint32_t queue_doorbell_id7;
+       uint32_t queue_doorbell_id8;
+       uint32_t queue_doorbell_id9;
+       uint32_t queue_doorbell_id10;
+       uint32_t queue_doorbell_id11;
+       uint32_t queue_doorbell_id12;
+       uint32_t queue_doorbell_id13;
+       uint32_t queue_doorbell_id14;
+       uint32_t queue_doorbell_id15;
+};
+
 #endif
index 4e8432d07f15a84893cc6cff666992135abdb7ae..dde5c7e29eb200b6dc78f1fad46197e43e0013ed 100644 (file)
@@ -134,7 +134,7 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
                              struct radeon_ib *ib)
 {
        struct radeon_ring *ring = &rdev->ring[ib->ring];
-       u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
+       u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf;
 
        if (rdev->wb.enabled) {
                u32 next_rptr = ring->wptr + 5;
@@ -541,31 +541,27 @@ struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
                                  unsigned num_gpu_pages,
                                  struct reservation_object *resv)
 {
-       struct radeon_semaphore *sem = NULL;
        struct radeon_fence *fence;
+       struct radeon_sync sync;
        int ring_index = rdev->asic->copy.dma_ring_index;
        struct radeon_ring *ring = &rdev->ring[ring_index];
        u32 size_in_bytes, cur_size_in_bytes;
        int i, num_loops;
        int r = 0;
 
-       r = radeon_semaphore_create(rdev, &sem);
-       if (r) {
-               DRM_ERROR("radeon: moving bo (%d).\n", r);
-               return ERR_PTR(r);
-       }
+       radeon_sync_create(&sync);
 
        size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
        num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
        r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
-               radeon_semaphore_free(rdev, &sem, NULL);
+               radeon_sync_free(rdev, &sync, NULL);
                return ERR_PTR(r);
        }
 
-       radeon_semaphore_sync_resv(rdev, sem, resv, false);
-       radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+       radeon_sync_resv(rdev, &sync, resv, false);
+       radeon_sync_rings(rdev, &sync, ring->idx);
 
        for (i = 0; i < num_loops; i++) {
                cur_size_in_bytes = size_in_bytes;
@@ -586,12 +582,12 @@ struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, &fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
-               radeon_semaphore_free(rdev, &sem, NULL);
+               radeon_sync_free(rdev, &sync, NULL);
                return ERR_PTR(r);
        }
 
        radeon_ring_unlock_commit(rdev, ring, false);
-       radeon_semaphore_free(rdev, &sem, fence);
+       radeon_sync_free(rdev, &sync, fence);
 
        return fence;
 }
@@ -667,17 +663,20 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        struct radeon_ib ib;
        unsigned i;
+       unsigned index;
        int r;
-       void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
        u32 tmp = 0;
+       u64 gpu_addr;
 
-       if (!ptr) {
-               DRM_ERROR("invalid vram scratch pointer\n");
-               return -EINVAL;
-       }
+       if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+               index = R600_WB_DMA_RING_TEST_OFFSET;
+       else
+               index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
+
+       gpu_addr = rdev->wb.gpu_addr + index;
 
        tmp = 0xCAFEDEAD;
-       writel(tmp, ptr);
+       rdev->wb.wb[index/4] = cpu_to_le32(tmp);
 
        r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
        if (r) {
@@ -686,8 +685,8 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        }
 
        ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
-       ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
-       ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr);
+       ib.ptr[1] = lower_32_bits(gpu_addr);
+       ib.ptr[2] = upper_32_bits(gpu_addr);
        ib.ptr[3] = 1;
        ib.ptr[4] = 0xDEADBEEF;
        ib.length_dw = 5;
@@ -704,7 +703,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
                return r;
        }
        for (i = 0; i < rdev->usec_timeout; i++) {
-               tmp = readl(ptr);
+               tmp = le32_to_cpu(rdev->wb.wb[index/4]);
                if (tmp == 0xDEADBEEF)
                        break;
                DRM_UDELAY(1);
@@ -901,25 +900,21 @@ void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
  * Update the page table base and flush the VM TLB
  * using sDMA (CIK).
  */
-void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+                     unsigned vm_id, uint64_t pd_addr)
 {
-       struct radeon_ring *ring = &rdev->ring[ridx];
-
-       if (vm == NULL)
-               return;
-
        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
-       if (vm->id < 8) {
-               radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+       if (vm_id < 8) {
+               radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
        } else {
-               radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+               radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
        }
-       radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+       radeon_ring_write(ring, pd_addr >> 12);
 
        /* update SH_MEM_* regs */
        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
        radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
-       radeon_ring_write(ring, VMID(vm->id));
+       radeon_ring_write(ring, VMID(vm_id));
 
        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
        radeon_ring_write(ring, SH_MEM_BASES >> 2);
@@ -942,11 +937,11 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
        radeon_ring_write(ring, VMID(0));
 
        /* flush HDP */
-       cik_sdma_hdp_flush_ring_emit(rdev, ridx);
+       cik_sdma_hdp_flush_ring_emit(rdev, ring->idx);
 
        /* flush TLB */
        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
        radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
-       radeon_ring_write(ring, 1 << vm->id);
+       radeon_ring_write(ring, 1 << vm_id);
 }
 
index 0c6e1b55d9684aa620e6c665c915e3293a92c7ab..ba85986febea5054762615ee7fc5edbaa275f2cd 100644 (file)
@@ -30,6 +30,8 @@
 #define CIK_RB_BITMAP_WIDTH_PER_SH     2
 #define HAWAII_RB_BITMAP_WIDTH_PER_SH  4
 
+#define RADEON_NUM_OF_VMIDS    8
+
 /* DIDT IND registers */
 #define DIDT_SQ_CTRL0                                     0x0
 #       define DIDT_CTRL_EN                               (1 << 0)
 #define                DIG_THERM_DPM(x)                        ((x) << 14)
 #define                DIG_THERM_DPM_MASK                      0x003FC000
 #define                DIG_THERM_DPM_SHIFT                     14
-
+#define        CG_THERMAL_STATUS                               0xC0300008
+#define                FDO_PWM_DUTY(x)                         ((x) << 9)
+#define                FDO_PWM_DUTY_MASK                       (0xff << 9)
+#define                FDO_PWM_DUTY_SHIFT                      9
 #define        CG_THERMAL_INT                                  0xC030000C
 #define                CI_DIG_THERM_INTH(x)                    ((x) << 8)
 #define                CI_DIG_THERM_INTH_MASK                  0x0000FF00
 #define                CI_DIG_THERM_INTL_SHIFT                 16
 #define        THERM_INT_MASK_HIGH                     (1 << 24)
 #define        THERM_INT_MASK_LOW                      (1 << 25)
-
+#define        CG_MULT_THERMAL_CTRL                            0xC0300010
+#define                TEMP_SEL(x)                             ((x) << 20)
+#define                TEMP_SEL_MASK                           (0xff << 20)
+#define                TEMP_SEL_SHIFT                          20
 #define        CG_MULT_THERMAL_STATUS                          0xC0300014
 #define                ASIC_MAX_TEMP(x)                        ((x) << 0)
 #define                ASIC_MAX_TEMP_MASK                      0x000001ff
 #define                CTF_TEMP_MASK                           0x0003fe00
 #define                CTF_TEMP_SHIFT                          9
 
+#define        CG_FDO_CTRL0                                    0xC0300064
+#define                FDO_STATIC_DUTY(x)                      ((x) << 0)
+#define                FDO_STATIC_DUTY_MASK                    0x000000FF
+#define                FDO_STATIC_DUTY_SHIFT                   0
+#define        CG_FDO_CTRL1                                    0xC0300068
+#define                FMAX_DUTY100(x)                         ((x) << 0)
+#define                FMAX_DUTY100_MASK                       0x000000FF
+#define                FMAX_DUTY100_SHIFT                      0
+#define        CG_FDO_CTRL2                                    0xC030006C
+#define                TMIN(x)                                 ((x) << 0)
+#define                TMIN_MASK                               0x000000FF
+#define                TMIN_SHIFT                              0
+#define                FDO_PWM_MODE(x)                         ((x) << 11)
+#define                FDO_PWM_MODE_MASK                       (7 << 11)
+#define                FDO_PWM_MODE_SHIFT                      11
+#define                TACH_PWM_RESP_RATE(x)                   ((x) << 25)
+#define                TACH_PWM_RESP_RATE_MASK                 (0x7f << 25)
+#define                TACH_PWM_RESP_RATE_SHIFT                25
+#define CG_TACH_CTRL                                    0xC0300070
+#       define EDGE_PER_REV(x)                          ((x) << 0)
+#       define EDGE_PER_REV_MASK                        (0x7 << 0)
+#       define EDGE_PER_REV_SHIFT                       0
+#       define TARGET_PERIOD(x)                         ((x) << 3)
+#       define TARGET_PERIOD_MASK                       0xfffffff8
+#       define TARGET_PERIOD_SHIFT                      3
+#define CG_TACH_STATUS                                  0xC0300074
+#       define TACH_PERIOD(x)                           ((x) << 0)
+#       define TACH_PERIOD_MASK                         0xffffffff
+#       define TACH_PERIOD_SHIFT                        0
+
 #define CG_ECLK_CNTL                                    0xC05000AC
 #       define ECLK_DIVIDER_MASK                        0x7f
 #       define ECLK_DIR_CNTL_EN                         (1 << 8)
 #define                        SH_MEM_ALIGNMENT_MODE_UNALIGNED                 3
 #define                DEFAULT_MTYPE(x)                                ((x) << 4)
 #define                APE1_MTYPE(x)                                   ((x) << 7)
+/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
+#define        MTYPE_CACHED                                    0
+#define        MTYPE_NONCACHED                                 3
 
 #define        SX_DEBUG_1                                      0x9060
 
 #define CP_HQD_ACTIVE                                     0xC91C
 #define CP_HQD_VMID                                       0xC920
 
+#define CP_HQD_PERSISTENT_STATE                                0xC924u
+#define        DEFAULT_CP_HQD_PERSISTENT_STATE                 (0x33U << 8)
+
+#define CP_HQD_PIPE_PRIORITY                           0xC928u
+#define CP_HQD_QUEUE_PRIORITY                          0xC92Cu
+#define CP_HQD_QUANTUM                                 0xC930u
+#define        QUANTUM_EN                                      1U
+#define        QUANTUM_SCALE_1MS                               (1U << 4)
+#define        QUANTUM_DURATION(x)                             ((x) << 8)
+
 #define CP_HQD_PQ_BASE                                    0xC934
 #define CP_HQD_PQ_BASE_HI                                 0xC938
 #define CP_HQD_PQ_RPTR                                    0xC93C
 #define                PRIV_STATE                              (1 << 30)
 #define                KMD_QUEUE                               (1 << 31)
 
-#define CP_HQD_DEQUEUE_REQUEST                          0xC974
+#define CP_HQD_IB_BASE_ADDR                            0xC95Cu
+#define CP_HQD_IB_BASE_ADDR_HI                 0xC960u
+#define CP_HQD_IB_RPTR                                 0xC964u
+#define CP_HQD_IB_CONTROL                              0xC968u
+#define        IB_ATC_EN                                       (1U << 23)
+#define        DEFAULT_MIN_IB_AVAIL_SIZE                       (3U << 20)
+
+#define CP_HQD_DEQUEUE_REQUEST                 0xC974
+#define        DEQUEUE_REQUEST_DRAIN                           1
+#define DEQUEUE_REQUEST_RESET                          2
 
 #define CP_MQD_CONTROL                                  0xC99C
 #define                MQD_VMID(x)                             ((x) << 0)
 #define                MQD_VMID_MASK                           (0xf << 0)
 
+#define CP_HQD_SEMA_CMD                                        0xC97Cu
+#define CP_HQD_MSG_TYPE                                        0xC980u
+#define CP_HQD_ATOMIC0_PREOP_LO                        0xC984u
+#define CP_HQD_ATOMIC0_PREOP_HI                        0xC988u
+#define CP_HQD_ATOMIC1_PREOP_LO                        0xC98Cu
+#define CP_HQD_ATOMIC1_PREOP_HI                        0xC990u
+#define CP_HQD_HQ_SCHEDULER0                   0xC994u
+#define CP_HQD_HQ_SCHEDULER1                   0xC998u
+
+#define SH_STATIC_MEM_CONFIG                   0x9604u
+
 #define DB_RENDER_CONTROL                               0x28000
 
 #define PA_SC_RASTER_CONFIG                             0x28350
 #define VCE_CMD_IB_AUTO                0x00000005
 #define VCE_CMD_SEMAPHORE      0x00000006
 
+#define ATC_VMID0_PASID_MAPPING                                        0x339Cu
+#define        ATC_VMID_PASID_MAPPING_UPDATE_STATUS    0x3398u
+#define        ATC_VMID_PASID_MAPPING_VALID                            (1U << 31)
+
+#define ATC_VM_APERTURE0_CNTL                                  0x3310u
+#define        ATS_ACCESS_MODE_NEVER                                           0
+#define        ATS_ACCESS_MODE_ALWAYS                                          1
+
+#define ATC_VM_APERTURE0_CNTL2                                 0x3318u
+#define ATC_VM_APERTURE0_HIGH_ADDR                             0x3308u
+#define ATC_VM_APERTURE0_LOW_ADDR                              0x3300u
+#define ATC_VM_APERTURE1_CNTL                                  0x3314u
+#define ATC_VM_APERTURE1_CNTL2                                 0x331Cu
+#define ATC_VM_APERTURE1_HIGH_ADDR                             0x330Cu
+#define ATC_VM_APERTURE1_LOW_ADDR                              0x3304u
+
 #endif
index f37d39d2bbbcf67f1e22dff4202aba0aa4320ef0..85995b4e33387586448629c44ae31bbff103f10e 100644 (file)
@@ -2345,6 +2345,9 @@ void evergreen_bandwidth_update(struct radeon_device *rdev)
        u32 num_heads = 0, lb_size;
        int i;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        for (i = 0; i < rdev->num_crtc; i++) {
@@ -2552,6 +2555,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
                                        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                                        tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
                                        WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+                                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
                                }
                        } else {
                                tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
index 5c8b358f9fbad903fb8615fd7806235fec0e5bf4..924b1b7ab455537755ebf0209ae5d7e2aa9a9e0e 100644 (file)
@@ -35,7 +35,7 @@
 #define MIN(a,b)                   (((a)<(b))?(a):(b))
 
 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
-                          struct radeon_cs_reloc **cs_reloc);
+                          struct radeon_bo_list **cs_reloc);
 struct evergreen_cs_track {
        u32                     group_size;
        u32                     nbanks;
@@ -1094,7 +1094,7 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
 static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 {
        struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        u32 last_reg;
        u32 m, i, tmp, *ib;
        int r;
@@ -1792,7 +1792,7 @@ static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                   struct radeon_cs_packet *pkt)
 {
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        struct evergreen_cs_track *track;
        volatile u32 *ib;
        unsigned idx;
@@ -2661,7 +2661,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
                        p->track = NULL;
                        return r;
                }
-       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       } while (p->idx < p->chunk_ib->length_dw);
 #if 0
        for (r = 0; r < p->ib.length_dw; r++) {
                printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
@@ -2684,8 +2684,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
  **/
 int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
 {
-       struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
-       struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
+       struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
+       struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
        u32 header, cmd, count, sub_cmd;
        volatile u32 *ib = p->ib.ptr;
        u32 idx;
@@ -3100,7 +3100,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                        DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
                        return -EINVAL;
                }
-       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       } while (p->idx < p->chunk_ib->length_dw);
 #if 0
        for (r = 0; r < p->ib->length_dw; r++) {
                printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
index 66bcfadeedd1a56265ea2f8121df864ecad4574e..96535aa8659c3ffe3df3ee2b208d6136aa4c4c35 100644 (file)
@@ -110,31 +110,27 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
                                        unsigned num_gpu_pages,
                                        struct reservation_object *resv)
 {
-       struct radeon_semaphore *sem = NULL;
        struct radeon_fence *fence;
+       struct radeon_sync sync;
        int ring_index = rdev->asic->copy.dma_ring_index;
        struct radeon_ring *ring = &rdev->ring[ring_index];
        u32 size_in_dw, cur_size_in_dw;
        int i, num_loops;
        int r = 0;
 
-       r = radeon_semaphore_create(rdev, &sem);
-       if (r) {
-               DRM_ERROR("radeon: moving bo (%d).\n", r);
-               return ERR_PTR(r);
-       }
+       radeon_sync_create(&sync);
 
        size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
        num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
        r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
-               radeon_semaphore_free(rdev, &sem, NULL);
+               radeon_sync_free(rdev, &sync, NULL);
                return ERR_PTR(r);
        }
 
-       radeon_semaphore_sync_resv(rdev, sem, resv, false);
-       radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+       radeon_sync_resv(rdev, &sync, resv, false);
+       radeon_sync_rings(rdev, &sync, ring->idx);
 
        for (i = 0; i < num_loops; i++) {
                cur_size_in_dw = size_in_dw;
@@ -153,12 +149,12 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, &fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
-               radeon_semaphore_free(rdev, &sem, NULL);
+               radeon_sync_free(rdev, &sync, NULL);
                return ERR_PTR(r);
        }
 
        radeon_ring_unlock_commit(rdev, ring, false);
-       radeon_semaphore_free(rdev, &sem, fence);
+       radeon_sync_free(rdev, &sync, fence);
 
        return fence;
 }
index 3faee58946dd0027b67d5d835af50f8de899ec93..360de9f1f4914079d3de3ad0022a50862b934395 100644 (file)
@@ -1373,6 +1373,7 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
        struct radeon_ring *ring = &rdev->ring[ib->ring];
+       unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
        u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
                PACKET3_SH_ACTION_ENA;
 
@@ -1395,15 +1396,14 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 #endif
                          (ib->gpu_addr & 0xFFFFFFFC));
        radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
-       radeon_ring_write(ring, ib->length_dw | 
-                         (ib->vm ? (ib->vm->id << 24) : 0));
+       radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
 
        /* flush read cache over gart for this vmid */
        radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
        radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
        radeon_ring_write(ring, 0xFFFFFFFF);
        radeon_ring_write(ring, 0);
-       radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
+       radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */
 }
 
 static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
@@ -2502,15 +2502,11 @@ void cayman_vm_decode_fault(struct radeon_device *rdev,
  * Update the page table base and flush the VM TLB
  * using the CP (cayman-si).
  */
-void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+                    unsigned vm_id, uint64_t pd_addr)
 {
-       struct radeon_ring *ring = &rdev->ring[ridx];
-
-       if (vm == NULL)
-               return;
-
-       radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
-       radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+       radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0));
+       radeon_ring_write(ring, pd_addr >> 12);
 
        /* flush hdp cache */
        radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
@@ -2518,7 +2514,7 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
 
        /* bits 0-7 are the VM contexts0-7 */
        radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
-       radeon_ring_write(ring, 1 << vm->id);
+       radeon_ring_write(ring, 1 << vm_id);
 
        /* sync PFP to ME, otherwise we might get invalid PFP reads */
        radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
index f26f0a9fb522ae4671740a7decd4ec18455656b0..50f88611ff60c832dc59e767543f91f8baf859eb 100644 (file)
@@ -123,6 +123,7 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
                                struct radeon_ib *ib)
 {
        struct radeon_ring *ring = &rdev->ring[ib->ring];
+       unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
 
        if (rdev->wb.enabled) {
                u32 next_rptr = ring->wptr + 4;
@@ -140,7 +141,7 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
         */
        while ((ring->wptr & 7) != 5)
                radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
-       radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+       radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
        radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
        radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
 
@@ -446,16 +447,12 @@ void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
                ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
 }
 
-void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+                        unsigned vm_id, uint64_t pd_addr)
 {
-       struct radeon_ring *ring = &rdev->ring[ridx];
-
-       if (vm == NULL)
-               return;
-
        radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
-       radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
-       radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+       radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
+       radeon_ring_write(ring, pd_addr >> 12);
 
        /* flush hdp cache */
        radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
@@ -465,6 +462,6 @@ void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm
        /* bits 0-7 are the VM contexts0-7 */
        radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
        radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
-       radeon_ring_write(ring, 1 << vm->id);
+       radeon_ring_write(ring, 1 << vm_id);
 }
 
index 5670b8291285d6827e87abd7a66b447ee774d315..7e5724a12f8b0d067445df96c15f14ba47cdc152 100644 (file)
 #define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
 #define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS   0x40
 
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+       FAN_CONTROL_FUZZY,
+       FAN_CONTROL_TABLE
+};
+
 #define PPSMC_Result_OK             ((uint8_t)0x01)
 #define PPSMC_Result_Failed         ((uint8_t)0xFF)
 
@@ -79,6 +87,8 @@ typedef uint8_t PPSMC_Result;
 #define PPSMC_MSG_DisableCac                ((uint8_t)0x54)
 #define PPSMC_TDPClampingActive             ((uint8_t)0x59)
 #define PPSMC_TDPClampingInactive           ((uint8_t)0x5A)
+#define PPSMC_StartFanControl               ((uint8_t)0x5B)
+#define PPSMC_StopFanControl                ((uint8_t)0x5C)
 #define PPSMC_MSG_NoDisplay                 ((uint8_t)0x5D)
 #define PPSMC_MSG_HasDisplay                ((uint8_t)0x5E)
 #define PPSMC_MSG_UVDPowerOFF               ((uint8_t)0x60)
@@ -106,6 +116,7 @@ typedef uint8_t PPSMC_Result;
 #define PPSMC_MSG_SAMUDPM_SetEnabledMask      ((uint16_t) 0x130)
 #define PPSMC_MSG_MCLKDPM_ForceState          ((uint16_t) 0x131)
 #define PPSMC_MSG_MCLKDPM_NoForcedLevel       ((uint16_t) 0x132)
+#define PPSMC_MSG_Thermal_Cntl_Disable        ((uint16_t) 0x133)
 #define PPSMC_MSG_Voltage_Cntl_Disable        ((uint16_t) 0x135)
 #define PPSMC_MSG_PCIeDPM_Enable              ((uint16_t) 0x136)
 #define PPSMC_MSG_PCIeDPM_Disable             ((uint16_t) 0x13d)
@@ -149,6 +160,10 @@ typedef uint8_t PPSMC_Result;
 #define PPSMC_MSG_MASTER_DeepSleep_ON         ((uint16_t) 0x18F)
 #define PPSMC_MSG_MASTER_DeepSleep_OFF        ((uint16_t) 0x190)
 #define PPSMC_MSG_Remove_DC_Clamp             ((uint16_t) 0x191)
+#define PPSMC_MSG_SetFanPwmMax                ((uint16_t) 0x19A)
+
+#define PPSMC_MSG_ENABLE_THERMAL_DPM          ((uint16_t) 0x19C)
+#define PPSMC_MSG_DISABLE_THERMAL_DPM         ((uint16_t) 0x19D)
 
 #define PPSMC_MSG_API_GetSclkFrequency        ((uint16_t) 0x200)
 #define PPSMC_MSG_API_GetMclkFrequency        ((uint16_t) 0x201)
@@ -157,10 +172,11 @@ typedef uint8_t PPSMC_Result;
 #define PPSMC_MSG_DPM_Config                ((uint32_t) 0x102)
 #define PPSMC_MSG_DPM_ForceState            ((uint32_t) 0x104)
 #define PPSMC_MSG_PG_SIMD_Config            ((uint32_t) 0x108)
-#define PPSMC_MSG_DPM_N_LevelsDisabled      ((uint32_t) 0x112)
+#define PPSMC_MSG_Thermal_Cntl_Enable       ((uint32_t) 0x10a)
 #define PPSMC_MSG_Voltage_Cntl_Enable       ((uint32_t) 0x109)
 #define PPSMC_MSG_VCEPowerOFF               ((uint32_t) 0x10e)
 #define PPSMC_MSG_VCEPowerON                ((uint32_t) 0x10f)
+#define PPSMC_MSG_DPM_N_LevelsDisabled      ((uint32_t) 0x112)
 #define PPSMC_MSG_DCE_RemoveVoltageAdjustment   ((uint32_t) 0x11d)
 #define PPSMC_MSG_DCE_AllowVoltageAdjustment    ((uint32_t) 0x11e)
 #define PPSMC_MSG_EnableBAPM                ((uint32_t) 0x120)
index 2d532996c69795cc129cb4ac6c60a4dfc4b70808..4c2eec49dadc93427bd54336daf4ee0d410fdcc4 100644 (file)
@@ -96,6 +96,14 @@ typedef struct _ATOM_PPLIB_FANTABLE2
     USHORT  usTMax;                          // The max temperature
 } ATOM_PPLIB_FANTABLE2;
 
+typedef struct _ATOM_PPLIB_FANTABLE3
+{
+       ATOM_PPLIB_FANTABLE2 basicTable2;
+       UCHAR ucFanControlMode;
+       USHORT usFanPWMMax;
+       USHORT usFanOutputSensitivity;
+} ATOM_PPLIB_FANTABLE3;
+
 typedef struct _ATOM_PPLIB_EXTENDEDHEADER
 {
     USHORT  usSize;
index 10f8be0ee1736394acaf9bac24eb516707917fd2..74f06d5405913a7e78c18af9d365c6549b591f45 100644 (file)
@@ -1254,7 +1254,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
        int r;
        u32 tile_flags = 0;
        u32 tmp;
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        u32 value;
 
        r = radeon_cs_packet_next_reloc(p, &reloc, 0);
@@ -1293,7 +1293,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
                             int idx)
 {
        unsigned c, i;
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        struct r100_cs_track *track;
        int r = 0;
        volatile uint32_t *ib;
@@ -1542,7 +1542,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                              struct radeon_cs_packet *pkt,
                              unsigned idx, unsigned reg)
 {
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        struct r100_cs_track *track;
        volatile uint32_t *ib;
        uint32_t tmp;
@@ -1901,7 +1901,7 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
 static int r100_packet3_check(struct radeon_cs_parser *p,
                              struct radeon_cs_packet *pkt)
 {
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        struct r100_cs_track *track;
        unsigned idx;
        volatile uint32_t *ib;
@@ -2061,7 +2061,7 @@ int r100_cs_parse(struct radeon_cs_parser *p)
                }
                if (r)
                        return r;
-       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       } while (p->idx < p->chunk_ib->length_dw);
        return 0;
 }
 
@@ -3207,6 +3207,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
        uint32_t pixel_bytes1 = 0;
        uint32_t pixel_bytes2 = 0;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        if (rdev->mode_info.crtcs[0]->base.enabled) {
index 732d4938aab75f91d1666960de2c0275de38d4a2..c70e6d5bcd198ce139f4eae2c842044da998b0e3 100644 (file)
@@ -146,7 +146,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                       struct radeon_cs_packet *pkt,
                       unsigned idx, unsigned reg)
 {
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        struct r100_cs_track *track;
        volatile uint32_t *ib;
        uint32_t tmp;
index 1bc4704034ce9d90f9563f598bb6135b44878f06..064ad5569ccaac826612aedd8d035db3996106db 100644 (file)
@@ -598,7 +598,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                struct radeon_cs_packet *pkt,
                unsigned idx, unsigned reg)
 {
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        struct r100_cs_track *track;
        volatile uint32_t *ib;
        uint32_t tmp, tile_flags = 0;
@@ -1142,7 +1142,7 @@ fail:
 static int r300_packet3_check(struct radeon_cs_parser *p,
                              struct radeon_cs_packet *pkt)
 {
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        struct r100_cs_track *track;
        volatile uint32_t *ib;
        unsigned idx;
@@ -1283,7 +1283,7 @@ int r300_cs_parse(struct radeon_cs_parser *p)
                if (r) {
                        return r;
                }
-       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       } while (p->idx < p->chunk_ib->length_dw);
        return 0;
 }
 
index 56b02927cd3de5901da653bbea13cfcc7e9858ab..ef5d6066fa5bbb95d40e8ff4ab99d8953d0f61b1 100644 (file)
@@ -2889,31 +2889,27 @@ struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
                                     unsigned num_gpu_pages,
                                     struct reservation_object *resv)
 {
-       struct radeon_semaphore *sem = NULL;
        struct radeon_fence *fence;
+       struct radeon_sync sync;
        int ring_index = rdev->asic->copy.blit_ring_index;
        struct radeon_ring *ring = &rdev->ring[ring_index];
        u32 size_in_bytes, cur_size_in_bytes, tmp;
        int i, num_loops;
        int r = 0;
 
-       r = radeon_semaphore_create(rdev, &sem);
-       if (r) {
-               DRM_ERROR("radeon: moving bo (%d).\n", r);
-               return ERR_PTR(r);
-       }
+       radeon_sync_create(&sync);
 
        size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
        num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
        r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
-               radeon_semaphore_free(rdev, &sem, NULL);
+               radeon_sync_free(rdev, &sync, NULL);
                return ERR_PTR(r);
        }
 
-       radeon_semaphore_sync_resv(rdev, sem, resv, false);
-       radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+       radeon_sync_resv(rdev, &sync, resv, false);
+       radeon_sync_rings(rdev, &sync, ring->idx);
 
        radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
        radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
@@ -2942,12 +2938,12 @@ struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, &fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
-               radeon_semaphore_free(rdev, &sem, NULL);
+               radeon_sync_free(rdev, &sync, NULL);
                return ERR_PTR(r);
        }
 
        radeon_ring_unlock_commit(rdev, ring, false);
-       radeon_semaphore_free(rdev, &sem, fence);
+       radeon_sync_free(rdev, &sync, fence);
 
        return fence;
 }
index c47537a1ddbad19cf49ad9cdae7cf6ef8438142d..acc1f99c84d993cd2b25b7379df36d55c7afa2e1 100644 (file)
@@ -969,7 +969,7 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
 static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 {
        struct r600_cs_track *track = (struct r600_cs_track *)p->track;
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        u32 m, i, tmp, *ib;
        int r;
 
@@ -1626,7 +1626,7 @@ static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 static int r600_packet3_check(struct radeon_cs_parser *p,
                                struct radeon_cs_packet *pkt)
 {
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        struct r600_cs_track *track;
        volatile u32 *ib;
        unsigned idx;
@@ -2316,7 +2316,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
                        p->track = NULL;
                        return r;
                }
-       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       } while (p->idx < p->chunk_ib->length_dw);
 #if 0
        for (r = 0; r < p->ib.length_dw; r++) {
                printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
@@ -2351,10 +2351,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
 
 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
 {
-       if (p->chunk_relocs_idx == -1) {
+       if (p->chunk_relocs == NULL) {
                return 0;
        }
-       p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+       p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
        if (p->relocs == NULL) {
                return -ENOMEM;
        }
@@ -2398,7 +2398,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
        /* Copy the packet into the IB, the parser will read from the
         * input memory (cached) and write to the IB (which can be
         * uncached). */
-       ib_chunk = &parser.chunks[parser.chunk_ib_idx];
+       ib_chunk = parser.chunk_ib;
        parser.ib.length_dw = ib_chunk->length_dw;
        *l = parser.ib.length_dw;
        if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
@@ -2435,24 +2435,24 @@ void r600_cs_legacy_init(void)
  * GPU offset using the provided start.
  **/
 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
-                          struct radeon_cs_reloc **cs_reloc)
+                          struct radeon_bo_list **cs_reloc)
 {
        struct radeon_cs_chunk *relocs_chunk;
        unsigned idx;
 
        *cs_reloc = NULL;
-       if (p->chunk_relocs_idx == -1) {
+       if (p->chunk_relocs == NULL) {
                DRM_ERROR("No relocation chunk !\n");
                return -EINVAL;
        }
-       relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+       relocs_chunk = p->chunk_relocs;
        idx = p->dma_reloc_idx;
        if (idx >= p->nrelocs) {
                DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
                          idx, p->nrelocs);
                return -EINVAL;
        }
-       *cs_reloc = p->relocs_ptr[idx];
+       *cs_reloc = &p->relocs[idx];
        p->dma_reloc_idx++;
        return 0;
 }
@@ -2472,8 +2472,8 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
  **/
 int r600_dma_cs_parse(struct radeon_cs_parser *p)
 {
-       struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
-       struct radeon_cs_reloc *src_reloc, *dst_reloc;
+       struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
+       struct radeon_bo_list *src_reloc, *dst_reloc;
        u32 header, cmd, count, tiled;
        volatile u32 *ib = p->ib.ptr;
        u32 idx, idx_value;
@@ -2619,7 +2619,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                        DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
                        return -EINVAL;
                }
-       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       } while (p->idx < p->chunk_ib->length_dw);
 #if 0
        for (r = 0; r < p->ib->length_dw; r++) {
                printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
index aabc343b9a8faa10728b5dd73444048a243b82e7..d2dd29ab24fa9192d2207c5790035ad0059b0ddd 100644 (file)
@@ -338,17 +338,17 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        struct radeon_ib ib;
        unsigned i;
+       unsigned index;
        int r;
-       void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
        u32 tmp = 0;
+       u64 gpu_addr;
 
-       if (!ptr) {
-               DRM_ERROR("invalid vram scratch pointer\n");
-               return -EINVAL;
-       }
+       if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+               index = R600_WB_DMA_RING_TEST_OFFSET;
+       else
+               index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
 
-       tmp = 0xCAFEDEAD;
-       writel(tmp, ptr);
+       gpu_addr = rdev->wb.gpu_addr + index;
 
        r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
        if (r) {
@@ -357,8 +357,8 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        }
 
        ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
-       ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
-       ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+       ib.ptr[1] = lower_32_bits(gpu_addr);
+       ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
        ib.ptr[3] = 0xDEADBEEF;
        ib.length_dw = 4;
 
@@ -374,7 +374,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
                return r;
        }
        for (i = 0; i < rdev->usec_timeout; i++) {
-               tmp = readl(ptr);
+               tmp = le32_to_cpu(rdev->wb.wb[index/4]);
                if (tmp == 0xDEADBEEF)
                        break;
                DRM_UDELAY(1);
@@ -441,31 +441,27 @@ struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
                                   unsigned num_gpu_pages,
                                   struct reservation_object *resv)
 {
-       struct radeon_semaphore *sem = NULL;
        struct radeon_fence *fence;
+       struct radeon_sync sync;
        int ring_index = rdev->asic->copy.dma_ring_index;
        struct radeon_ring *ring = &rdev->ring[ring_index];
        u32 size_in_dw, cur_size_in_dw;
        int i, num_loops;
        int r = 0;
 
-       r = radeon_semaphore_create(rdev, &sem);
-       if (r) {
-               DRM_ERROR("radeon: moving bo (%d).\n", r);
-               return ERR_PTR(r);
-       }
+       radeon_sync_create(&sync);
 
        size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
        num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
        r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
-               radeon_semaphore_free(rdev, &sem, NULL);
+               radeon_sync_free(rdev, &sync, NULL);
                return ERR_PTR(r);
        }
 
-       radeon_semaphore_sync_resv(rdev, sem, resv, false);
-       radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+       radeon_sync_resv(rdev, &sync, resv, false);
+       radeon_sync_rings(rdev, &sync, ring->idx);
 
        for (i = 0; i < num_loops; i++) {
                cur_size_in_dw = size_in_dw;
@@ -484,12 +480,12 @@ struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, &fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
-               radeon_semaphore_free(rdev, &sem, NULL);
+               radeon_sync_free(rdev, &sync, NULL);
                return ERR_PTR(r);
        }
 
        radeon_ring_unlock_commit(rdev, ring, false);
-       radeon_semaphore_free(rdev, &sem, fence);
+       radeon_sync_free(rdev, &sync, fence);
 
        return fence;
 }
index f6309bd23e0156461f625aa30e8354dbf3a129dd..843b65f46ece168a8694a8a86a974befda12290b 100644 (file)
@@ -811,6 +811,7 @@ union power_info {
 union fan_info {
        struct _ATOM_PPLIB_FANTABLE fan;
        struct _ATOM_PPLIB_FANTABLE2 fan2;
+       struct _ATOM_PPLIB_FANTABLE3 fan3;
 };
 
 static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
@@ -900,6 +901,14 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
                        else
                                rdev->pm.dpm.fan.t_max = 10900;
                        rdev->pm.dpm.fan.cycle_delay = 100000;
+                       if (fan_info->fan.ucFanTableFormat >= 3) {
+                               rdev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
+                               rdev->pm.dpm.fan.default_max_fan_pwm =
+                                       le16_to_cpu(fan_info->fan3.usFanPWMMax);
+                               rdev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
+                               rdev->pm.dpm.fan.fan_output_sensitivity =
+                                       le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
+                       }
                        rdev->pm.dpm.fan.ucode_fan_control = true;
                }
        }
@@ -1256,7 +1265,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
                                        (mode_info->atom_context->bios + data_offset +
                                         le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
                                rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
-                                       ppt->usMaximumPowerDeliveryLimit;
+                                       le16_to_cpu(ppt->usMaximumPowerDeliveryLimit);
                                pt = &ppt->power_tune_table;
                        } else {
                                ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
index 46b9d2a03018d854cb07c72d3eaeee9aa8f4a10d..bd499d749bc980d96b49472d7206665dfb3ab175 100644 (file)
@@ -96,6 +96,9 @@
 #define R600_TEMP_RANGE_MIN (90 * 1000)
 #define R600_TEMP_RANGE_MAX (120 * 1000)
 
+#define FDO_PWM_MODE_STATIC  1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
 enum r600_power_level {
        R600_POWER_LEVEL_LOW = 0,
        R600_POWER_LEVEL_MEDIUM = 1,
index a9717b3fbf1b4bd77417c38c29ac2c577179e947..54529b837afaa1d76147f764664047af5f09b5f6 100644 (file)
@@ -150,9 +150,6 @@ extern int radeon_backlight;
 /* number of hw syncs before falling back on blocking */
 #define RADEON_NUM_SYNCS                       4
 
-/* number of hw syncs before falling back on blocking */
-#define RADEON_NUM_SYNCS                       4
-
 /* hardcode those limit for now */
 #define RADEON_VA_IB_OFFSET                    (1 << 20)
 #define RADEON_VA_RESERVED_SIZE                        (8 << 20)
@@ -363,14 +360,15 @@ struct radeon_fence_driver {
 };
 
 struct radeon_fence {
-       struct fence base;
+       struct fence            base;
 
-       struct radeon_device            *rdev;
-       uint64_t                        seq;
+       struct radeon_device    *rdev;
+       uint64_t                seq;
        /* RB, DMA, etc. */
-       unsigned                        ring;
+       unsigned                ring;
+       bool                    is_vm_update;
 
-       wait_queue_t                    fence_wake;
+       wait_queue_t            fence_wake;
 };
 
 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
@@ -452,12 +450,22 @@ struct radeon_mman {
 #endif
 };
 
+struct radeon_bo_list {
+       struct radeon_bo                *robj;
+       struct ttm_validate_buffer      tv;
+       uint64_t                        gpu_offset;
+       unsigned                        prefered_domains;
+       unsigned                        allowed_domains;
+       uint32_t                        tiling_flags;
+};
+
 /* bo virtual address in a specific vm */
 struct radeon_bo_va {
        /* protected by bo being reserved */
        struct list_head                bo_list;
        uint32_t                        flags;
        uint64_t                        addr;
+       struct radeon_fence             *last_pt_update;
        unsigned                        ref_count;
 
        /* protected by vm mutex */
@@ -474,7 +482,7 @@ struct radeon_bo {
        struct list_head                list;
        /* Protected by tbo.reserved */
        u32                             initial_domain;
-       struct ttm_place                placements[3];
+       struct ttm_place                placements[4];
        struct ttm_placement            placement;
        struct ttm_buffer_object        tbo;
        struct ttm_bo_kmap_obj          kmap;
@@ -576,10 +584,9 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
  * Semaphores.
  */
 struct radeon_semaphore {
-       struct radeon_sa_bo             *sa_bo;
-       signed                          waiters;
-       uint64_t                        gpu_addr;
-       struct radeon_fence             *sync_to[RADEON_NUM_RINGS];
+       struct radeon_sa_bo     *sa_bo;
+       signed                  waiters;
+       uint64_t                gpu_addr;
 };
 
 int radeon_semaphore_create(struct radeon_device *rdev,
@@ -588,19 +595,32 @@ bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
                                  struct radeon_semaphore *semaphore);
 bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
                                struct radeon_semaphore *semaphore);
-void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
-                                struct radeon_fence *fence);
-int radeon_semaphore_sync_resv(struct radeon_device *rdev,
-                              struct radeon_semaphore *semaphore,
-                              struct reservation_object *resv,
-                              bool shared);
-int radeon_semaphore_sync_rings(struct radeon_device *rdev,
-                               struct radeon_semaphore *semaphore,
-                               int waiting_ring);
 void radeon_semaphore_free(struct radeon_device *rdev,
                           struct radeon_semaphore **semaphore,
                           struct radeon_fence *fence);
 
+/*
+ * Synchronization
+ */
+struct radeon_sync {
+       struct radeon_semaphore *semaphores[RADEON_NUM_SYNCS];
+       struct radeon_fence     *sync_to[RADEON_NUM_RINGS];
+       struct radeon_fence     *last_vm_update;
+};
+
+void radeon_sync_create(struct radeon_sync *sync);
+void radeon_sync_fence(struct radeon_sync *sync,
+                      struct radeon_fence *fence);
+int radeon_sync_resv(struct radeon_device *rdev,
+                    struct radeon_sync *sync,
+                    struct reservation_object *resv,
+                    bool shared);
+int radeon_sync_rings(struct radeon_device *rdev,
+                     struct radeon_sync *sync,
+                     int waiting_ring);
+void radeon_sync_free(struct radeon_device *rdev, struct radeon_sync *sync,
+                     struct radeon_fence *fence);
+
 /*
  * GART structures, functions & helpers
  */
@@ -701,6 +721,10 @@ struct radeon_doorbell {
 
 int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
 void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
+void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
+                                 phys_addr_t *aperture_base,
+                                 size_t *aperture_size,
+                                 size_t *start_offset);
 
 /*
  * IRQS.
@@ -814,7 +838,7 @@ struct radeon_ib {
        struct radeon_fence             *fence;
        struct radeon_vm                *vm;
        bool                            is_const_ib;
-       struct radeon_semaphore         *semaphore;
+       struct radeon_sync              sync;
 };
 
 struct radeon_ring {
@@ -891,33 +915,40 @@ struct radeon_vm_pt {
        uint64_t                        addr;
 };
 
+struct radeon_vm_id {
+       unsigned                id;
+       uint64_t                pd_gpu_addr;
+       /* last flushed PD/PT update */
+       struct radeon_fence     *flushed_updates;
+       /* last use of vmid */
+       struct radeon_fence     *last_id_use;
+};
+
 struct radeon_vm {
-       struct rb_root                  va;
-       unsigned                        id;
+       struct mutex            mutex;
+
+       struct rb_root          va;
+
+       /* protecting invalidated and freed */
+       spinlock_t              status_lock;
 
        /* BOs moved, but not yet updated in the PT */
-       struct list_head                invalidated;
+       struct list_head        invalidated;
 
        /* BOs freed, but not yet updated in the PT */
-       struct list_head                freed;
+       struct list_head        freed;
 
        /* contains the page directory */
-       struct radeon_bo                *page_directory;
-       uint64_t                        pd_gpu_addr;
-       unsigned                        max_pde_used;
+       struct radeon_bo        *page_directory;
+       unsigned                max_pde_used;
 
        /* array of page tables, one for each page directory entry */
-       struct radeon_vm_pt             *page_tables;
+       struct radeon_vm_pt     *page_tables;
 
-       struct radeon_bo_va             *ib_bo_va;
+       struct radeon_bo_va     *ib_bo_va;
 
-       struct mutex                    mutex;
-       /* last fence for cs using this vm */
-       struct radeon_fence             *fence;
-       /* last flush or NULL if we still need to flush */
-       struct radeon_fence             *last_flush;
-       /* last use of vmid */
-       struct radeon_fence             *last_id_use;
+       /* for id and flush management per ring */
+       struct radeon_vm_id     ids[RADEON_NUM_RINGS];
 };
 
 struct radeon_vm_manager {
@@ -1025,19 +1056,7 @@ void cayman_dma_fini(struct radeon_device *rdev);
 /*
  * CS.
  */
-struct radeon_cs_reloc {
-       struct drm_gem_object           *gobj;
-       struct radeon_bo                *robj;
-       struct ttm_validate_buffer      tv;
-       uint64_t                        gpu_offset;
-       unsigned                        prefered_domains;
-       unsigned                        allowed_domains;
-       uint32_t                        tiling_flags;
-       uint32_t                        handle;
-};
-
 struct radeon_cs_chunk {
-       uint32_t                chunk_id;
        uint32_t                length_dw;
        uint32_t                *kdata;
        void __user             *user_ptr;
@@ -1055,16 +1074,15 @@ struct radeon_cs_parser {
        unsigned                idx;
        /* relocations */
        unsigned                nrelocs;
-       struct radeon_cs_reloc  *relocs;
-       struct radeon_cs_reloc  **relocs_ptr;
-       struct radeon_cs_reloc  *vm_bos;
+       struct radeon_bo_list   *relocs;
+       struct radeon_bo_list   *vm_bos;
        struct list_head        validated;
        unsigned                dma_reloc_idx;
        /* indices of various chunks */
-       int                     chunk_ib_idx;
-       int                     chunk_relocs_idx;
-       int                     chunk_flags_idx;
-       int                     chunk_const_ib_idx;
+       struct radeon_cs_chunk  *chunk_ib;
+       struct radeon_cs_chunk  *chunk_relocs;
+       struct radeon_cs_chunk  *chunk_flags;
+       struct radeon_cs_chunk  *chunk_const_ib;
        struct radeon_ib        ib;
        struct radeon_ib        const_ib;
        void                    *track;
@@ -1078,7 +1096,7 @@ struct radeon_cs_parser {
 
 static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
 {
-       struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+       struct radeon_cs_chunk *ibc = p->chunk_ib;
 
        if (ibc->kdata)
                return ibc->kdata[idx];
@@ -1490,6 +1508,10 @@ struct radeon_dpm_fan {
        u8 t_hyst;
        u32 cycle_delay;
        u16 t_max;
+       u8 control_mode;
+       u16 default_max_fan_pwm;
+       u16 default_fan_output_sensitivity;
+       u16 fan_output_sensitivity;
        bool ucode_fan_control;
 };
 
@@ -1623,6 +1645,11 @@ struct radeon_pm {
        /* internal thermal controller on rv6xx+ */
        enum radeon_int_thermal_type int_thermal_type;
        struct device           *int_hwmon_dev;
+       /* fan control parameters */
+       bool                    no_fan;
+       u8                      fan_pulses_per_revolution;
+       u8                      fan_min_rpm;
+       u8                      fan_max_rpm;
        /* dpm */
        bool                    dpm_enabled;
        struct radeon_dpm       dpm;
@@ -1785,7 +1812,8 @@ struct radeon_asic_ring {
        void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
        bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
                               struct radeon_semaphore *semaphore, bool emit_wait);
-       void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+       void (*vm_flush)(struct radeon_device *rdev, struct radeon_ring *ring,
+                        unsigned vm_id, uint64_t pd_addr);
 
        /* testing functions */
        int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -2388,6 +2416,8 @@ struct radeon_device {
        struct radeon_atcs              atcs;
        /* srbm instance registers */
        struct mutex                    srbm_mutex;
+       /* GRBM index mutex. Protects concurrents access to GRBM index */
+       struct mutex                    grbm_idx_mutex;
        /* clock, powergating flags */
        u32 cg_flags;
        u32 pg_flags;
@@ -2400,6 +2430,10 @@ struct radeon_device {
        u64 vram_pin_size;
        u64 gart_pin_size;
 
+       /* amdkfd interface */
+       struct kfd_dev          *kfd;
+       struct radeon_sa_manager        kfd_bo;
+
        struct mutex    mn_lock;
        DECLARE_HASHTABLE(mn_hash, 7);
 };
@@ -2831,7 +2865,7 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
 #define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
 #define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
 #define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
-#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)]->vm_flush((rdev), (r), (vm))
+#define radeon_ring_vm_flush(rdev, r, vm_id, pd_addr) (rdev)->asic->ring[(r)->idx]->vm_flush((rdev), (r), (vm_id), (pd_addr))
 #define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
 #define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
 #define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
@@ -2940,14 +2974,14 @@ int radeon_vm_manager_init(struct radeon_device *rdev);
 void radeon_vm_manager_fini(struct radeon_device *rdev);
 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
-struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
+struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
                                          struct radeon_vm *vm,
                                           struct list_head *head);
 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
                                       struct radeon_vm *vm, int ring);
 void radeon_vm_flush(struct radeon_device *rdev,
                      struct radeon_vm *vm,
-                     int ring);
+                    int ring, struct radeon_fence *fence);
 void radeon_vm_fence(struct radeon_device *rdev,
                     struct radeon_vm *vm,
                     struct radeon_fence *fence);
@@ -3054,7 +3088,7 @@ bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
 void radeon_cs_dump_packet(struct radeon_cs_parser *p,
                           struct radeon_cs_packet *pkt);
 int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
-                               struct radeon_cs_reloc **cs_reloc,
+                               struct radeon_bo_list **cs_reloc,
                                int nomm);
 int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
                               uint32_t *vline_start_end,
index d8ace5b28a5b2e1455b5776129022300b3d1be3e..2a45d548d5ece5d9cd1cdad32d64fb3808b0c6fd 100644 (file)
@@ -599,7 +599,8 @@ int cayman_asic_reset(struct radeon_device *rdev);
 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int cayman_vm_init(struct radeon_device *rdev);
 void cayman_vm_fini(struct radeon_device *rdev);
-void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+                    unsigned vm_id, uint64_t pd_addr);
 uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
 int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -624,7 +625,8 @@ void cayman_dma_vm_set_pages(struct radeon_device *rdev,
                             uint32_t incr, uint32_t flags);
 void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
 
-void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+                        unsigned vm_id, uint64_t pd_addr);
 
 u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
                        struct radeon_ring *ring);
@@ -699,7 +701,8 @@ int si_irq_set(struct radeon_device *rdev);
 int si_irq_process(struct radeon_device *rdev);
 int si_vm_init(struct radeon_device *rdev);
 void si_vm_fini(struct radeon_device *rdev);
-void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+                unsigned vm_id, uint64_t pd_addr);
 int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
                                 uint64_t src_offset, uint64_t dst_offset,
@@ -721,7 +724,8 @@ void si_dma_vm_set_pages(struct radeon_device *rdev,
                         uint64_t addr, unsigned count,
                         uint32_t incr, uint32_t flags);
 
-void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+                    unsigned vm_id, uint64_t pd_addr);
 u32 si_get_xclk(struct radeon_device *rdev);
 uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
 int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
@@ -793,7 +797,8 @@ int cik_irq_set(struct radeon_device *rdev);
 int cik_irq_process(struct radeon_device *rdev);
 int cik_vm_init(struct radeon_device *rdev);
 void cik_vm_fini(struct radeon_device *rdev);
-void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+                 unsigned vm_id, uint64_t pd_addr);
 
 void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
                            struct radeon_ib *ib,
@@ -811,7 +816,8 @@ void cik_sdma_vm_set_pages(struct radeon_device *rdev,
                           uint32_t incr, uint32_t flags);
 void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
 
-void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+                     unsigned vm_id, uint64_t pd_addr);
 int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 u32 cik_gfx_get_rptr(struct radeon_device *rdev,
                     struct radeon_ring *ring);
index df69b92ba164158cc5e7ae79f7b59db65305dd49..dbc94f3002972dfb9319289e133bbc5ca2d985c6 100644 (file)
@@ -196,8 +196,8 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
        }
 }
 
-static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
-                                                u8 id)
+struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev,
+                                                  u8 id)
 {
        struct atom_context *ctx = rdev->mode_info.atom_context;
        struct radeon_gpio_rec gpio;
@@ -221,6 +221,7 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
                        if (id == pin->ucGPIO_ID) {
                                gpio.id = pin->ucGPIO_ID;
                                gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
+                               gpio.shift = pin->ucGpioPinBitShift;
                                gpio.mask = (1 << pin->ucGpioPinBitShift);
                                gpio.valid = true;
                                break;
@@ -801,7 +802,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                                                                hpd_record =
                                                                        (ATOM_HPD_INT_RECORD *)
                                                                        record;
-                                                               gpio = radeon_lookup_gpio(rdev,
+                                                               gpio = radeon_atombios_lookup_gpio(rdev,
                                                                                          hpd_record->ucHPDIntGPIOID);
                                                                hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
                                                                hpd.plugged_state = hpd_record->ucPlugged_PinState;
@@ -2128,7 +2129,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
                                rdev->pm.power_state[state_index].clock_info[0].voltage.type =
                                        VOLTAGE_GPIO;
                                rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
-                                       radeon_lookup_gpio(rdev,
+                                       radeon_atombios_lookup_gpio(rdev,
                                                           power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
                                if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
                                        rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
@@ -2164,7 +2165,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
                                rdev->pm.power_state[state_index].clock_info[0].voltage.type =
                                        VOLTAGE_GPIO;
                                rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
-                                       radeon_lookup_gpio(rdev,
+                                       radeon_atombios_lookup_gpio(rdev,
                                                           power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
                                if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
                                        rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
@@ -2200,7 +2201,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
                                rdev->pm.power_state[state_index].clock_info[0].voltage.type =
                                        VOLTAGE_GPIO;
                                rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
-                                       radeon_lookup_gpio(rdev,
+                                       radeon_atombios_lookup_gpio(rdev,
                                                           power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
                                if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
                                        rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
@@ -2248,6 +2249,14 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
 
        /* add the i2c bus for thermal/fan chip */
        if (controller->ucType > 0) {
+               if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
+                       rdev->pm.no_fan = true;
+               rdev->pm.fan_pulses_per_revolution =
+                       controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+               if (rdev->pm.fan_pulses_per_revolution) {
+                       rdev->pm.fan_min_rpm = controller->ucFanMinRPM;
+                       rdev->pm.fan_max_rpm = controller->ucFanMaxRPM;
+               }
                if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
                        DRM_INFO("Internal thermal controller %s fan control\n",
                                 (controller->ucFanParameters &
index 300c4b3d4669426d5085d7e5d2e52e23321988d3..26baa9c05f6c49ac40e2a238a6079fa8e0e40920 100644 (file)
@@ -322,6 +322,12 @@ static void radeon_connector_get_edid(struct drm_connector *connector)
        }
 
        if (!radeon_connector->edid) {
+               /* don't fetch the edid from the vbios if ddc fails and runpm is
+                * enabled so we report disconnected.
+                */
+               if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
+                       return;
+
                if (rdev->is_atom_bios) {
                        /* some laptops provide a hardcoded edid in rom for LCDs */
                        if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
@@ -826,6 +832,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector,
 static enum drm_connector_status
 radeon_lvds_detect(struct drm_connector *connector, bool force)
 {
+       struct drm_device *dev = connector->dev;
+       struct radeon_device *rdev = dev->dev_private;
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        struct drm_encoder *encoder = radeon_best_single_encoder(connector);
        enum drm_connector_status ret = connector_status_disconnected;
@@ -842,7 +850,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
                /* check if panel is valid */
                if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
                        ret = connector_status_connected;
-
+               /* don't fetch the edid from the vbios if ddc fails and runpm is
+                * enabled so we report disconnected.
+                */
+               if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
+                       ret = connector_status_disconnected;
        }
 
        /* check for edid as well */
@@ -1589,6 +1601,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
                        /* check if panel is valid */
                        if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
                                ret = connector_status_connected;
+                       /* don't fetch the edid from the vbios if ddc fails and runpm is
+                        * enabled so we report disconnected.
+                        */
+                       if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
+                               ret = connector_status_disconnected;
                }
                /* eDP is always DP */
                radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
index a3e7aed7e68075f5418ad83e80217d7f6206d89d..c830863bc98aa0cb55e84aedfbbc76606945ee01 100644 (file)
@@ -77,22 +77,18 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
        struct drm_device *ddev = p->rdev->ddev;
        struct radeon_cs_chunk *chunk;
        struct radeon_cs_buckets buckets;
-       unsigned i, j;
-       bool duplicate, need_mmap_lock = false;
+       unsigned i;
+       bool need_mmap_lock = false;
        int r;
 
-       if (p->chunk_relocs_idx == -1) {
+       if (p->chunk_relocs == NULL) {
                return 0;
        }
-       chunk = &p->chunks[p->chunk_relocs_idx];
+       chunk = p->chunk_relocs;
        p->dma_reloc_idx = 0;
        /* FIXME: we assume that each relocs use 4 dwords */
        p->nrelocs = chunk->length_dw / 4;
-       p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
-       if (p->relocs_ptr == NULL) {
-               return -ENOMEM;
-       }
-       p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+       p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
        if (p->relocs == NULL) {
                return -ENOMEM;
        }
@@ -101,31 +97,17 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 
        for (i = 0; i < p->nrelocs; i++) {
                struct drm_radeon_cs_reloc *r;
+               struct drm_gem_object *gobj;
                unsigned priority;
 
-               duplicate = false;
                r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
-               for (j = 0; j < i; j++) {
-                       if (r->handle == p->relocs[j].handle) {
-                               p->relocs_ptr[i] = &p->relocs[j];
-                               duplicate = true;
-                               break;
-                       }
-               }
-               if (duplicate) {
-                       p->relocs[i].handle = 0;
-                       continue;
-               }
-
-               p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
-                                                         r->handle);
-               if (p->relocs[i].gobj == NULL) {
+               gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
+               if (gobj == NULL) {
                        DRM_ERROR("gem object lookup failed 0x%x\n",
                                  r->handle);
                        return -ENOENT;
                }
-               p->relocs_ptr[i] = &p->relocs[i];
-               p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
+               p->relocs[i].robj = gem_to_radeon_bo(gobj);
 
                /* The userspace buffer priorities are from 0 to 15. A higher
                 * number means the buffer is more important.
@@ -184,7 +166,6 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 
                p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
                p->relocs[i].tv.shared = !r->write_domain;
-               p->relocs[i].handle = r->handle;
 
                radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
                                      priority);
@@ -251,22 +232,19 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
 
 static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
 {
-       int i, r = 0;
+       struct radeon_bo_list *reloc;
+       int r;
 
-       for (i = 0; i < p->nrelocs; i++) {
+       list_for_each_entry(reloc, &p->validated, tv.head) {
                struct reservation_object *resv;
 
-               if (!p->relocs[i].robj)
-                       continue;
-
-               resv = p->relocs[i].robj->tbo.resv;
-               r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv,
-                                              p->relocs[i].tv.shared);
-
+               resv = reloc->robj->tbo.resv;
+               r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
+                                    reloc->tv.shared);
                if (r)
-                       break;
+                       return r;
        }
-       return r;
+       return 0;
 }
 
 /* XXX: note that this is called from the legacy UMS CS ioctl as well */
@@ -285,13 +263,11 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
        INIT_LIST_HEAD(&p->validated);
        p->idx = 0;
        p->ib.sa_bo = NULL;
-       p->ib.semaphore = NULL;
        p->const_ib.sa_bo = NULL;
-       p->const_ib.semaphore = NULL;
-       p->chunk_ib_idx = -1;
-       p->chunk_relocs_idx = -1;
-       p->chunk_flags_idx = -1;
-       p->chunk_const_ib_idx = -1;
+       p->chunk_ib = NULL;
+       p->chunk_relocs = NULL;
+       p->chunk_flags = NULL;
+       p->chunk_const_ib = NULL;
        p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
        if (p->chunks_array == NULL) {
                return -ENOMEM;
@@ -318,24 +294,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                        return -EFAULT;
                }
                p->chunks[i].length_dw = user_chunk.length_dw;
-               p->chunks[i].chunk_id = user_chunk.chunk_id;
-               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
-                       p->chunk_relocs_idx = i;
+               if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
+                       p->chunk_relocs = &p->chunks[i];
                }
-               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
-                       p->chunk_ib_idx = i;
+               if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
+                       p->chunk_ib = &p->chunks[i];
                        /* zero length IB isn't useful */
                        if (p->chunks[i].length_dw == 0)
                                return -EINVAL;
                }
-               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
-                       p->chunk_const_ib_idx = i;
+               if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
+                       p->chunk_const_ib = &p->chunks[i];
                        /* zero length CONST IB isn't useful */
                        if (p->chunks[i].length_dw == 0)
                                return -EINVAL;
                }
-               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
-                       p->chunk_flags_idx = i;
+               if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
+                       p->chunk_flags = &p->chunks[i];
                        /* zero length flags aren't useful */
                        if (p->chunks[i].length_dw == 0)
                                return -EINVAL;
@@ -344,10 +319,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                size = p->chunks[i].length_dw;
                cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
                p->chunks[i].user_ptr = cdata;
-               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
+               if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
                        continue;
 
-               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
+               if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
                        if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
                                continue;
                }
@@ -360,7 +335,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
                        return -EFAULT;
                }
-               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+               if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
                        p->cs_flags = p->chunks[i].kdata[0];
                        if (p->chunks[i].length_dw > 1)
                                ring = p->chunks[i].kdata[1];
@@ -401,8 +376,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
 static int cmp_size_smaller_first(void *priv, struct list_head *a,
                                  struct list_head *b)
 {
-       struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
-       struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
+       struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
+       struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
 
        /* Sort A before B if A is smaller. */
        return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
@@ -443,13 +418,15 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
 
        if (parser->relocs != NULL) {
                for (i = 0; i < parser->nrelocs; i++) {
-                       if (parser->relocs[i].gobj)
-                               drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
+                       struct radeon_bo *bo = parser->relocs[i].robj;
+                       if (bo == NULL)
+                               continue;
+
+                       drm_gem_object_unreference_unlocked(&bo->gem_base);
                }
        }
        kfree(parser->track);
        kfree(parser->relocs);
-       kfree(parser->relocs_ptr);
        drm_free_large(parser->vm_bos);
        for (i = 0; i < parser->nchunks; i++)
                drm_free_large(parser->chunks[i].kdata);
@@ -464,7 +441,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
 {
        int r;
 
-       if (parser->chunk_ib_idx == -1)
+       if (parser->chunk_ib == NULL)
                return 0;
 
        if (parser->cs_flags & RADEON_CS_USE_VM)
@@ -524,10 +501,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
        for (i = 0; i < p->nrelocs; i++) {
                struct radeon_bo *bo;
 
-               /* ignore duplicates */
-               if (p->relocs_ptr[i] != &p->relocs[i])
-                       continue;
-
                bo = p->relocs[i].robj;
                bo_va = radeon_vm_bo_find(vm, bo);
                if (bo_va == NULL) {
@@ -538,6 +511,8 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
                r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
                if (r)
                        return r;
+
+               radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
        }
 
        return radeon_vm_clear_invalids(rdev, vm);
@@ -550,7 +525,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
        struct radeon_vm *vm = &fpriv->vm;
        int r;
 
-       if (parser->chunk_ib_idx == -1)
+       if (parser->chunk_ib == NULL)
                return 0;
        if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
                return 0;
@@ -582,10 +557,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
                        DRM_ERROR("Failed to sync rings: %i\n", r);
                goto out;
        }
-       radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence);
 
        if ((rdev->family >= CHIP_TAHITI) &&
-           (parser->chunk_const_ib_idx != -1)) {
+           (parser->chunk_const_ib != NULL)) {
                r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
        } else {
                r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
@@ -612,7 +586,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
        struct radeon_vm *vm = NULL;
        int r;
 
-       if (parser->chunk_ib_idx == -1)
+       if (parser->chunk_ib == NULL)
                return 0;
 
        if (parser->cs_flags & RADEON_CS_USE_VM) {
@@ -620,8 +594,8 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
                vm = &fpriv->vm;
 
                if ((rdev->family >= CHIP_TAHITI) &&
-                   (parser->chunk_const_ib_idx != -1)) {
-                       ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
+                   (parser->chunk_const_ib != NULL)) {
+                       ib_chunk = parser->chunk_const_ib;
                        if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
                                DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
                                return -EINVAL;
@@ -640,13 +614,13 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
                                return -EFAULT;
                }
 
-               ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+               ib_chunk = parser->chunk_ib;
                if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
                        DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
                        return -EINVAL;
                }
        }
-       ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+       ib_chunk = parser->chunk_ib;
 
        r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
                           vm, ib_chunk->length_dw * 4);
@@ -738,7 +712,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
                           struct radeon_cs_packet *pkt,
                           unsigned idx)
 {
-       struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+       struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
        struct radeon_device *rdev = p->rdev;
        uint32_t header;
 
@@ -832,7 +806,7 @@ void radeon_cs_dump_packet(struct radeon_cs_parser *p,
  * GPU offset using the provided start.
  **/
 int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
-                               struct radeon_cs_reloc **cs_reloc,
+                               struct radeon_bo_list **cs_reloc,
                                int nomm)
 {
        struct radeon_cs_chunk *relocs_chunk;
@@ -840,12 +814,12 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
        unsigned idx;
        int r;
 
-       if (p->chunk_relocs_idx == -1) {
+       if (p->chunk_relocs == NULL) {
                DRM_ERROR("No relocation chunk !\n");
                return -EINVAL;
        }
        *cs_reloc = NULL;
-       relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+       relocs_chunk = p->chunk_relocs;
        r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
        if (r)
                return r;
@@ -871,6 +845,6 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
                        (u64)relocs_chunk->kdata[idx + 3] << 32;
                (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
        } else
-               *cs_reloc = p->relocs_ptr[(idx / 4)];
+               *cs_reloc = &p->relocs[(idx / 4)];
        return 0;
 }
index 9630e8d95fb40655df4ceefd18e5015ca47194c6..45e54060ee97eea33cd005b993c6ca704cf80a16 100644 (file)
@@ -117,106 +117,7 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
        }
 }
 
-static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
-                             uint64_t gpu_addr)
-{
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-       struct radeon_device *rdev = crtc->dev->dev_private;
-
-       if (ASIC_IS_DCE4(rdev)) {
-               WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
-                      upper_32_bits(gpu_addr));
-               WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
-                      gpu_addr & 0xffffffff);
-       } else if (ASIC_IS_AVIVO(rdev)) {
-               if (rdev->family >= CHIP_RV770) {
-                       if (radeon_crtc->crtc_id)
-                               WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
-                       else
-                               WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
-               }
-               WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
-                      gpu_addr & 0xffffffff);
-       } else {
-               radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
-               /* offset is from DISP(2)_BASE_ADDRESS */
-               WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
-       }
-}
-
-int radeon_crtc_cursor_set(struct drm_crtc *crtc,
-                          struct drm_file *file_priv,
-                          uint32_t handle,
-                          uint32_t width,
-                          uint32_t height)
-{
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-       struct radeon_device *rdev = crtc->dev->dev_private;
-       struct drm_gem_object *obj;
-       struct radeon_bo *robj;
-       uint64_t gpu_addr;
-       int ret;
-
-       if (!handle) {
-               /* turn off cursor */
-               radeon_hide_cursor(crtc);
-               obj = NULL;
-               goto unpin;
-       }
-
-       if ((width > radeon_crtc->max_cursor_width) ||
-           (height > radeon_crtc->max_cursor_height)) {
-               DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
-               return -EINVAL;
-       }
-
-       obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
-       if (!obj) {
-               DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
-               return -ENOENT;
-       }
-
-       robj = gem_to_radeon_bo(obj);
-       ret = radeon_bo_reserve(robj, false);
-       if (unlikely(ret != 0))
-               goto fail;
-       /* Only 27 bit offset for legacy cursor */
-       ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
-                                      ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
-                                      &gpu_addr);
-       radeon_bo_unreserve(robj);
-       if (ret)
-               goto fail;
-
-       radeon_crtc->cursor_width = width;
-       radeon_crtc->cursor_height = height;
-
-       radeon_lock_cursor(crtc, true);
-       radeon_set_cursor(crtc, obj, gpu_addr);
-       radeon_show_cursor(crtc);
-       radeon_lock_cursor(crtc, false);
-
-unpin:
-       if (radeon_crtc->cursor_bo) {
-               robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
-               ret = radeon_bo_reserve(robj, false);
-               if (likely(ret == 0)) {
-                       radeon_bo_unpin(robj);
-                       radeon_bo_unreserve(robj);
-               }
-               drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
-       }
-
-       radeon_crtc->cursor_bo = obj;
-       return 0;
-fail:
-       drm_gem_object_unreference_unlocked(obj);
-
-       return ret;
-}
-
-int radeon_crtc_cursor_move(struct drm_crtc *crtc,
-                           int x, int y)
+static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct radeon_device *rdev = crtc->dev->dev_private;
@@ -281,7 +182,6 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
                }
        }
 
-       radeon_lock_cursor(crtc, true);
        if (ASIC_IS_DCE4(rdev)) {
                WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
                WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
@@ -308,7 +208,173 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
                WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
                                                                      (yorigin * 256)));
        }
+
+       radeon_crtc->cursor_x = x;
+       radeon_crtc->cursor_y = y;
+
+       return 0;
+}
+
+int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+                           int x, int y)
+{
+       int ret;
+
+       radeon_lock_cursor(crtc, true);
+       ret = radeon_cursor_move_locked(crtc, x, y);
        radeon_lock_cursor(crtc, false);
 
+       return ret;
+}
+
+static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct radeon_device *rdev = crtc->dev->dev_private;
+       struct radeon_bo *robj = gem_to_radeon_bo(obj);
+       uint64_t gpu_addr;
+       int ret;
+
+       ret = radeon_bo_reserve(robj, false);
+       if (unlikely(ret != 0))
+               goto fail;
+       /* Only 27 bit offset for legacy cursor */
+       ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+                                      ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+                                      &gpu_addr);
+       radeon_bo_unreserve(robj);
+       if (ret)
+               goto fail;
+
+       if (ASIC_IS_DCE4(rdev)) {
+               WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+                      upper_32_bits(gpu_addr));
+               WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+                      gpu_addr & 0xffffffff);
+       } else if (ASIC_IS_AVIVO(rdev)) {
+               if (rdev->family >= CHIP_RV770) {
+                       if (radeon_crtc->crtc_id)
+                               WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
+                       else
+                               WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
+               }
+               WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+                      gpu_addr & 0xffffffff);
+       } else {
+               radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
+               /* offset is from DISP(2)_BASE_ADDRESS */
+               WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
+       }
+
        return 0;
+
+fail:
+       drm_gem_object_unreference_unlocked(obj);
+
+       return ret;
+}
+
+int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
+                           struct drm_file *file_priv,
+                           uint32_t handle,
+                           uint32_t width,
+                           uint32_t height,
+                           int32_t hot_x,
+                           int32_t hot_y)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct drm_gem_object *obj;
+       int ret;
+
+       if (!handle) {
+               /* turn off cursor */
+               radeon_hide_cursor(crtc);
+               obj = NULL;
+               goto unpin;
+       }
+
+       if ((width > radeon_crtc->max_cursor_width) ||
+           (height > radeon_crtc->max_cursor_height)) {
+               DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+               return -EINVAL;
+       }
+
+       obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+       if (!obj) {
+               DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
+               return -ENOENT;
+       }
+
+       radeon_crtc->cursor_width = width;
+       radeon_crtc->cursor_height = height;
+
+       radeon_lock_cursor(crtc, true);
+
+       if (hot_x != radeon_crtc->cursor_hot_x ||
+           hot_y != radeon_crtc->cursor_hot_y) {
+               int x, y;
+
+               x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
+               y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
+
+               radeon_cursor_move_locked(crtc, x, y);
+
+               radeon_crtc->cursor_hot_x = hot_x;
+               radeon_crtc->cursor_hot_y = hot_y;
+       }
+
+       ret = radeon_set_cursor(crtc, obj);
+
+       if (ret)
+               DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
+                         ret);
+       else
+               radeon_show_cursor(crtc);
+
+       radeon_lock_cursor(crtc, false);
+
+unpin:
+       if (radeon_crtc->cursor_bo) {
+               struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+               ret = radeon_bo_reserve(robj, false);
+               if (likely(ret == 0)) {
+                       radeon_bo_unpin(robj);
+                       radeon_bo_unreserve(robj);
+               }
+               if (radeon_crtc->cursor_bo != obj)
+                       drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+       }
+
+       radeon_crtc->cursor_bo = obj;
+       return 0;
+}
+
+/**
+ * radeon_cursor_reset - Re-set the current cursor, if any.
+ *
+ * @crtc: drm crtc
+ *
+ * If the CRTC passed in currently has a cursor assigned, this function
+ * makes sure it's visible.
+ */
+void radeon_cursor_reset(struct drm_crtc *crtc)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       int ret;
+
+       if (radeon_crtc->cursor_bo) {
+               radeon_lock_cursor(crtc, true);
+
+               radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
+                                         radeon_crtc->cursor_y);
+
+               ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
+               if (ret)
+                       DRM_ERROR("radeon_set_cursor returned %d, not showing "
+                                 "cursor\n", ret);
+               else
+                       radeon_show_cursor(crtc);
+
+               radeon_lock_cursor(crtc, false);
+       }
 }
index ea2676954dde7ce6e157aa1d628782c0906bc7c2..0ec65168f331c73bcbfff24ee719cd6f98790602 100644 (file)
@@ -377,6 +377,37 @@ void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
                __clear_bit(doorbell, rdev->doorbell.used);
 }
 
+/**
+ * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
+ *                                setup KFD
+ *
+ * @rdev: radeon_device pointer
+ * @aperture_base: output returning doorbell aperture base physical address
+ * @aperture_size: output returning doorbell aperture size in bytes
+ * @start_offset: output returning # of doorbell bytes reserved for radeon.
+ *
+ * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
+ * takes doorbells required for its own rings and reports the setup to KFD.
+ * Radeon reserved doorbells are at the start of the doorbell aperture.
+ */
+void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
+                                 phys_addr_t *aperture_base,
+                                 size_t *aperture_size,
+                                 size_t *start_offset)
+{
+       /* The first num_doorbells are used by radeon.
+        * KFD takes whatever's left in the aperture. */
+       if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
+               *aperture_base = rdev->doorbell.base;
+               *aperture_size = rdev->doorbell.size;
+               *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
+       } else {
+               *aperture_base = 0;
+               *aperture_size = 0;
+               *start_offset = 0;
+       }
+}
+
 /*
  * radeon_wb_*()
  * Writeback is the the method by which the the GPU updates special pages
@@ -952,6 +983,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
        }
 
        mutex_init(&rdev->mode_info.atom_context->mutex);
+       mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
        radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
        atom_allocate_fb_scratch(rdev->mode_info.atom_context);
        return 0;
@@ -1272,6 +1304,7 @@ int radeon_device_init(struct radeon_device *rdev,
        mutex_init(&rdev->pm.mutex);
        mutex_init(&rdev->gpu_clock_mutex);
        mutex_init(&rdev->srbm_mutex);
+       mutex_init(&rdev->grbm_idx_mutex);
        init_rwsem(&rdev->pm.mclk_lock);
        init_rwsem(&rdev->exclusive_lock);
        init_waitqueue_head(&rdev->irq.vblank_queue);
index 00ead8c2758a972debddf97234e206d211db5d7a..102116902a070f728c434a8ee67215ede0cffb70 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/pm_runtime.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
 #include <drm/drm_edid.h>
 
 #include <linux/gcd.h>
@@ -634,7 +635,7 @@ radeon_crtc_set_config(struct drm_mode_set *set)
        return ret;
 }
 static const struct drm_crtc_funcs radeon_crtc_funcs = {
-       .cursor_set = radeon_crtc_cursor_set,
+       .cursor_set2 = radeon_crtc_cursor_set2,
        .cursor_move = radeon_crtc_cursor_move,
        .gamma_set = radeon_crtc_gamma_set,
        .set_config = radeon_crtc_set_config,
index dcffa30ee2db3d2b990abc4089967c204984be5d..4f50fb0e3d93448034191821496d299290bbc5a6 100644 (file)
@@ -41,6 +41,8 @@
 #include <drm/drm_gem.h>
 
 #include "drm_crtc_helper.h"
+#include "radeon_kfd.h"
+
 /*
  * KMS wrapper.
  * - 2.0.0 - initial interface
@@ -654,12 +656,15 @@ static int __init radeon_init(void)
 #endif
        }
 
+       radeon_kfd_init();
+
        /* let modprobe override vga console setting */
        return drm_pci_init(driver, pdriver);
 }
 
 static void __exit radeon_exit(void)
 {
+       radeon_kfd_fini();
        drm_pci_exit(driver, pdriver);
        radeon_unregister_atpx_handler();
 }
index 9a19e52cc655bf8b3c697a7116e52e81b8060096..6b670b0bc47bb9dca0238ef35dcff1cc2f686c34 100644 (file)
@@ -179,6 +179,9 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
                    (rdev->pdev->subsystem_vendor == 0x1734) &&
                    (rdev->pdev->subsystem_device == 0x1107))
                        use_bl = false;
+               /* disable native backlight control on older asics */
+               else if (rdev->family < CHIP_R600)
+                       use_bl = false;
                else
                        use_bl = true;
        }
index 0ea1db83d57390e70874f3182893b277b56c6f94..29b9220ec3998daee56bac0969de169268964c13 100644 (file)
@@ -48,10 +48,40 @@ struct radeon_fbdev {
        struct radeon_device *rdev;
 };
 
+/**
+ * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
+ *
+ * @info: fbdev info
+ *
+ * This function hides the cursor on all CRTCs used by fbdev.
+ */
+static int radeon_fb_helper_set_par(struct fb_info *info)
+{
+       int ret;
+
+       ret = drm_fb_helper_set_par(info);
+
+       /* XXX: with universal plane support fbdev will automatically disable
+        * all non-primary planes (including the cursor)
+        */
+       if (ret == 0) {
+               struct drm_fb_helper *fb_helper = info->par;
+               int i;
+
+               for (i = 0; i < fb_helper->crtc_count; i++) {
+                       struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
+
+                       radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
+               }
+       }
+
+       return ret;
+}
+
 static struct fb_ops radeonfb_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
-       .fb_set_par = drm_fb_helper_set_par,
+       .fb_set_par = radeon_fb_helper_set_par,
        .fb_fillrect = cfb_fillrect,
        .fb_copyarea = cfb_copyarea,
        .fb_imageblit = cfb_imageblit,
index 995167025282a13ff7fdbbe7a1555f0d228a751b..d13d1b5a859f5b4d6aa69adc18cf85a2375398d1 100644 (file)
@@ -140,6 +140,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
        (*fence)->rdev = rdev;
        (*fence)->seq = seq;
        (*fence)->ring = ring;
+       (*fence)->is_vm_update = false;
        fence_init(&(*fence)->base, &radeon_fence_ops,
                   &rdev->fence_queue.lock, rdev->fence_context + ring, seq);
        radeon_fence_ring_emit(rdev, ring, *fence);
index c194497aa586e16c5c17776d5baa5959f3eac7c5..fe48f229043e33720c26bcdd40949d7a8fd41b00 100644 (file)
@@ -394,9 +394,10 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        return r;
 }
 
-int radeon_mode_dumb_mmap(struct drm_file *filp,
-                         struct drm_device *dev,
-                         uint32_t handle, uint64_t *offset_p)
+static int radeon_mode_mmap(struct drm_file *filp,
+                           struct drm_device *dev,
+                           uint32_t handle, bool dumb,
+                           uint64_t *offset_p)
 {
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
@@ -405,6 +406,14 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
        if (gobj == NULL) {
                return -ENOENT;
        }
+
+       /*
+        * We don't allow dumb mmaps on objects created using another
+        * interface.
+        */
+       WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
+               "Illegal dumb map of GPU buffer.\n");
+
        robj = gem_to_radeon_bo(gobj);
        if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
                drm_gem_object_unreference_unlocked(gobj);
@@ -415,12 +424,20 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
        return 0;
 }
 
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+                         struct drm_device *dev,
+                         uint32_t handle, uint64_t *offset_p)
+{
+       return radeon_mode_mmap(filp, dev, handle, true, offset_p);
+}
+
 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *filp)
 {
        struct drm_radeon_gem_mmap *args = data;
 
-       return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
+       return radeon_mode_mmap(filp, dev, args->handle, false,
+                               &args->addr_ptr);
 }
 
 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -518,6 +535,68 @@ out:
        return r;
 }
 
+/**
+ * radeon_gem_va_update_vm -update the bo_va in its VM
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: bo_va to update
+ *
+ * Update the bo_va directly after setting it's address. Errors are not
+ * vital here, so they are not reported back to userspace.
+ */
+static void radeon_gem_va_update_vm(struct radeon_device *rdev,
+                                   struct radeon_bo_va *bo_va)
+{
+       struct ttm_validate_buffer tv, *entry;
+       struct radeon_bo_list *vm_bos;
+       struct ww_acquire_ctx ticket;
+       struct list_head list;
+       unsigned domain;
+       int r;
+
+       INIT_LIST_HEAD(&list);
+
+       tv.bo = &bo_va->bo->tbo;
+       tv.shared = true;
+       list_add(&tv.head, &list);
+
+       vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
+       if (!vm_bos)
+               return;
+
+       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+       if (r)
+               goto error_free;
+
+       list_for_each_entry(entry, &list, head) {
+               domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
+               /* if anything is swapped out don't swap it in here,
+                  just abort and wait for the next CS */
+               if (domain == RADEON_GEM_DOMAIN_CPU)
+                       goto error_unreserve;
+       }
+
+       mutex_lock(&bo_va->vm->mutex);
+       r = radeon_vm_clear_freed(rdev, bo_va->vm);
+       if (r)
+               goto error_unlock;
+
+       if (bo_va->it.start)
+               r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
+
+error_unlock:
+       mutex_unlock(&bo_va->vm->mutex);
+
+error_unreserve:
+       ttm_eu_backoff_reservation(&ticket, &list);
+
+error_free:
+       drm_free_large(vm_bos);
+
+       if (r)
+               DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
+}
+
 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *filp)
 {
@@ -601,6 +680,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
                if (bo_va->it.start) {
                        args->operation = RADEON_VA_RESULT_VA_EXIST;
                        args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
+                       radeon_bo_unreserve(rbo);
                        goto out;
                }
                r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
@@ -611,12 +691,13 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
        default:
                break;
        }
+       if (!r)
+               radeon_gem_va_update_vm(rdev, bo_va);
        args->operation = RADEON_VA_RESULT_OK;
        if (r) {
                args->operation = RADEON_VA_RESULT_ERROR;
        }
 out:
-       radeon_bo_unreserve(rbo);
        drm_gem_object_unreference_unlocked(gobj);
        return r;
 }
@@ -682,6 +763,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
                return -ENOMEM;
 
        r = drm_gem_handle_create(file_priv, gobj, &handle);
+       gobj->dumb = true;
        /* drop reference from allocate - handle holds it now */
        drm_gem_object_unreference_unlocked(gobj);
        if (r) {
index 3f39fcca4d0744ec666630be4e3672ada57cba08..c39ce1f057037e6ae3ddae1c11be1d07d1572b95 100644 (file)
@@ -64,10 +64,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
                return r;
        }
 
-       r = radeon_semaphore_create(rdev, &ib->semaphore);
-       if (r) {
-               return r;
-       }
+       radeon_sync_create(&ib->sync);
 
        ib->ring = ring;
        ib->fence = NULL;
@@ -96,7 +93,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
  */
 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
 {
-       radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
+       radeon_sync_free(rdev, &ib->sync, ib->fence);
        radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
        radeon_fence_unref(&ib->fence);
 }
@@ -145,11 +142,11 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
        if (ib->vm) {
                struct radeon_fence *vm_id_fence;
                vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
-               radeon_semaphore_sync_fence(ib->semaphore, vm_id_fence);
+               radeon_sync_fence(&ib->sync, vm_id_fence);
        }
 
        /* sync with other rings */
-       r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
+       r = radeon_sync_rings(rdev, &ib->sync, ib->ring);
        if (r) {
                dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
                radeon_ring_unlock_undo(rdev, ring);
@@ -157,11 +154,12 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
        }
 
        if (ib->vm)
-               radeon_vm_flush(rdev, ib->vm, ib->ring);
+               radeon_vm_flush(rdev, ib->vm, ib->ring,
+                               ib->sync.last_vm_update);
 
        if (const_ib) {
                radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
-               radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
+               radeon_sync_free(rdev, &const_ib->sync, NULL);
        }
        radeon_ring_ib_execute(rdev, ib->ring, ib);
        r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
index 7784911d78ef6fc54d6aeea23950f4585d3c74c4..00fc59762e0df3bba0758d1f18e90328e5726635 100644 (file)
@@ -185,6 +185,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
        if (rdev->flags & RADEON_IS_AGP)
                return false;
 
+       /*
+        * Older chips have a HW limitation, they can only generate 40 bits
+        * of address for "64-bit" MSIs which breaks on some platforms, notably
+        * IBM POWER servers, so we limit them
+        */
+       if (rdev->family < CHIP_BONAIRE) {
+               dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
+               rdev->pdev->no_64bit_msi = 1;
+       }
+
        /* force MSI on */
        if (radeon_msi == 1)
                return true;
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
new file mode 100644 (file)
index 0000000..065d020
--- /dev/null
@@ -0,0 +1,563 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "cikd.h"
+#include "cik_reg.h"
+#include "radeon_kfd.h"
+
+#define CIK_PIPE_PER_MEC       (4)
+
+struct kgd_mem {
+       struct radeon_sa_bo *sa_bo;
+       uint64_t gpu_addr;
+       void *ptr;
+};
+
+static int init_sa_manager(struct kgd_dev *kgd, unsigned int size);
+static void fini_sa_manager(struct kgd_dev *kgd);
+
+static int allocate_mem(struct kgd_dev *kgd, size_t size, size_t alignment,
+               enum kgd_memory_pool pool, struct kgd_mem **mem);
+
+static void free_mem(struct kgd_dev *kgd, struct kgd_mem *mem);
+
+static uint64_t get_vmem_size(struct kgd_dev *kgd);
+static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
+
+static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+
+/*
+ * Register access functions
+ */
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+               uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
+               uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+                                       unsigned int vmid);
+
+static int kgd_init_memory(struct kgd_dev *kgd);
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+                               uint32_t hpd_size, uint64_t hpd_gpu_addr);
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+                       uint32_t queue_id, uint32_t __user *wptr);
+
+static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+                               uint32_t pipe_id, uint32_t queue_id);
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+                               unsigned int timeout, uint32_t pipe_id,
+                               uint32_t queue_id);
+
+static const struct kfd2kgd_calls kfd2kgd = {
+       .init_sa_manager = init_sa_manager,
+       .fini_sa_manager = fini_sa_manager,
+       .allocate_mem = allocate_mem,
+       .free_mem = free_mem,
+       .get_vmem_size = get_vmem_size,
+       .get_gpu_clock_counter = get_gpu_clock_counter,
+       .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+       .program_sh_mem_settings = kgd_program_sh_mem_settings,
+       .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+       .init_memory = kgd_init_memory,
+       .init_pipeline = kgd_init_pipeline,
+       .hqd_load = kgd_hqd_load,
+       .hqd_is_occupies = kgd_hqd_is_occupies,
+       .hqd_destroy = kgd_hqd_destroy,
+};
+
+static const struct kgd2kfd_calls *kgd2kfd;
+
+bool radeon_kfd_init(void)
+{
+       bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*,
+                               const struct kgd2kfd_calls**);
+
+       kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+
+       if (kgd2kfd_init_p == NULL)
+               return false;
+
+       if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
+               symbol_put(kgd2kfd_init);
+               kgd2kfd = NULL;
+
+               return false;
+       }
+
+       return true;
+}
+
+void radeon_kfd_fini(void)
+{
+       if (kgd2kfd) {
+               kgd2kfd->exit();
+               symbol_put(kgd2kfd_init);
+       }
+}
+
+void radeon_kfd_device_probe(struct radeon_device *rdev)
+{
+       if (kgd2kfd)
+               rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev, rdev->pdev);
+}
+
+void radeon_kfd_device_init(struct radeon_device *rdev)
+{
+       if (rdev->kfd) {
+               struct kgd2kfd_shared_resources gpu_resources = {
+                       .compute_vmid_bitmap = 0xFF00,
+
+                       .first_compute_pipe = 1,
+                       .compute_pipe_count = 8 - 1,
+               };
+
+               radeon_doorbell_get_kfd_info(rdev,
+                               &gpu_resources.doorbell_physical_address,
+                               &gpu_resources.doorbell_aperture_size,
+                               &gpu_resources.doorbell_start_offset);
+
+               kgd2kfd->device_init(rdev->kfd, &gpu_resources);
+       }
+}
+
+void radeon_kfd_device_fini(struct radeon_device *rdev)
+{
+       if (rdev->kfd) {
+               kgd2kfd->device_exit(rdev->kfd);
+               rdev->kfd = NULL;
+       }
+}
+
+void radeon_kfd_interrupt(struct radeon_device *rdev, const void *ih_ring_entry)
+{
+       if (rdev->kfd)
+               kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
+}
+
+void radeon_kfd_suspend(struct radeon_device *rdev)
+{
+       if (rdev->kfd)
+               kgd2kfd->suspend(rdev->kfd);
+}
+
+int radeon_kfd_resume(struct radeon_device *rdev)
+{
+       int r = 0;
+
+       if (rdev->kfd)
+               r = kgd2kfd->resume(rdev->kfd);
+
+       return r;
+}
+
+static u32 pool_to_domain(enum kgd_memory_pool p)
+{
+       switch (p) {
+       case KGD_POOL_FRAMEBUFFER: return RADEON_GEM_DOMAIN_VRAM;
+       default: return RADEON_GEM_DOMAIN_GTT;
+       }
+}
+
+static int init_sa_manager(struct kgd_dev *kgd, unsigned int size)
+{
+       struct radeon_device *rdev = (struct radeon_device *)kgd;
+       int r;
+
+       BUG_ON(kgd == NULL);
+
+       r = radeon_sa_bo_manager_init(rdev, &rdev->kfd_bo,
+                                     size,
+                                     RADEON_GPU_PAGE_SIZE,
+                                     RADEON_GEM_DOMAIN_GTT,
+                                     RADEON_GEM_GTT_WC);
+
+       if (r)
+               return r;
+
+       r = radeon_sa_bo_manager_start(rdev, &rdev->kfd_bo);
+       if (r)
+               radeon_sa_bo_manager_fini(rdev, &rdev->kfd_bo);
+
+       return r;
+}
+
+static void fini_sa_manager(struct kgd_dev *kgd)
+{
+       struct radeon_device *rdev = (struct radeon_device *)kgd;
+
+       BUG_ON(kgd == NULL);
+
+       radeon_sa_bo_manager_suspend(rdev, &rdev->kfd_bo);
+       radeon_sa_bo_manager_fini(rdev, &rdev->kfd_bo);
+}
+
+static int allocate_mem(struct kgd_dev *kgd, size_t size, size_t alignment,
+               enum kgd_memory_pool pool, struct kgd_mem **mem)
+{
+       struct radeon_device *rdev = (struct radeon_device *)kgd;
+       u32 domain;
+       int r;
+
+       BUG_ON(kgd == NULL);
+
+       domain = pool_to_domain(pool);
+       if (domain != RADEON_GEM_DOMAIN_GTT) {
+               dev_err(rdev->dev,
+                       "Only allowed to allocate gart memory for kfd\n");
+               return -EINVAL;
+       }
+
+       *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+       if ((*mem) == NULL)
+               return -ENOMEM;
+
+       r = radeon_sa_bo_new(rdev, &rdev->kfd_bo, &(*mem)->sa_bo, size,
+                               alignment);
+       if (r) {
+               dev_err(rdev->dev, "failed to get memory for kfd (%d)\n", r);
+               return r;
+       }
+
+       (*mem)->ptr = radeon_sa_bo_cpu_addr((*mem)->sa_bo);
+       (*mem)->gpu_addr = radeon_sa_bo_gpu_addr((*mem)->sa_bo);
+
+       return 0;
+}
+
+static void free_mem(struct kgd_dev *kgd, struct kgd_mem *mem)
+{
+       struct radeon_device *rdev = (struct radeon_device *)kgd;
+
+       BUG_ON(kgd == NULL);
+
+       radeon_sa_bo_free(rdev, &mem->sa_bo, NULL);
+       kfree(mem);
+}
+
+static uint64_t get_vmem_size(struct kgd_dev *kgd)
+{
+       struct radeon_device *rdev = (struct radeon_device *)kgd;
+
+       BUG_ON(kgd == NULL);
+
+       return rdev->mc.real_vram_size;
+}
+
+static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+{
+       struct radeon_device *rdev = (struct radeon_device *)kgd;
+
+       return rdev->asic->get_gpu_clock_counter(rdev);
+}
+
+static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+{
+       struct radeon_device *rdev = (struct radeon_device *)kgd;
+
+       /* The sclk is in quantas of 10kHz */
+       return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
+}
+
+static inline struct radeon_device *get_radeon_device(struct kgd_dev *kgd)
+{
+       return (struct radeon_device *)kgd;
+}
+
+static void write_register(struct kgd_dev *kgd, uint32_t offset, uint32_t value)
+{
+       struct radeon_device *rdev = get_radeon_device(kgd);
+
+       writel(value, (void __iomem *)(rdev->rmmio + offset));
+}
+
+static uint32_t read_register(struct kgd_dev *kgd, uint32_t offset)
+{
+       struct radeon_device *rdev = get_radeon_device(kgd);
+
+       return readl((void __iomem *)(rdev->rmmio + offset));
+}
+
+static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+                       uint32_t queue, uint32_t vmid)
+{
+       struct radeon_device *rdev = get_radeon_device(kgd);
+       uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
+
+       mutex_lock(&rdev->srbm_mutex);
+       write_register(kgd, SRBM_GFX_CNTL, value);
+}
+
+static void unlock_srbm(struct kgd_dev *kgd)
+{
+       struct radeon_device *rdev = get_radeon_device(kgd);
+
+       write_register(kgd, SRBM_GFX_CNTL, 0);
+       mutex_unlock(&rdev->srbm_mutex);
+}
+
+static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+                               uint32_t queue_id)
+{
+       uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+       uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+       lock_srbm(kgd, mec, pipe, queue_id, 0);
+}
+
+static void release_queue(struct kgd_dev *kgd)
+{
+       unlock_srbm(kgd);
+}
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+                                       uint32_t sh_mem_config,
+                                       uint32_t sh_mem_ape1_base,
+                                       uint32_t sh_mem_ape1_limit,
+                                       uint32_t sh_mem_bases)
+{
+       lock_srbm(kgd, 0, 0, 0, vmid);
+
+       write_register(kgd, SH_MEM_CONFIG, sh_mem_config);
+       write_register(kgd, SH_MEM_APE1_BASE, sh_mem_ape1_base);
+       write_register(kgd, SH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
+       write_register(kgd, SH_MEM_BASES, sh_mem_bases);
+
+       unlock_srbm(kgd);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+                                       unsigned int vmid)
+{
+       /*
+        * We have to assume that there is no outstanding mapping.
+        * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0
+        * because a mapping is in progress or because a mapping finished and
+        * the SW cleared it.
+        * So the protocol is to always wait & clear.
+        */
+       uint32_t pasid_mapping = (pasid == 0) ? 0 :
+                               (uint32_t)pasid | ATC_VMID_PASID_MAPPING_VALID;
+
+       write_register(kgd, ATC_VMID0_PASID_MAPPING + vmid*sizeof(uint32_t),
+                       pasid_mapping);
+
+       while (!(read_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS) &
+                                                               (1U << vmid)))
+               cpu_relax();
+       write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+
+       return 0;
+}
+
+static int kgd_init_memory(struct kgd_dev *kgd)
+{
+       /*
+        * Configure apertures:
+        * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
+        * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
+        * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
+        */
+       int i;
+       uint32_t sh_mem_bases = PRIVATE_BASE(0x6000) | SHARED_BASE(0x6000);
+
+       for (i = 8; i < 16; i++) {
+               uint32_t sh_mem_config;
+
+               lock_srbm(kgd, 0, 0, 0, i);
+
+               sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+               sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED);
+
+               write_register(kgd, SH_MEM_CONFIG, sh_mem_config);
+
+               write_register(kgd, SH_MEM_BASES, sh_mem_bases);
+
+               /* Scratch aperture is not supported for now. */
+               write_register(kgd, SH_STATIC_MEM_CONFIG, 0);
+
+               /* APE1 disabled for now. */
+               write_register(kgd, SH_MEM_APE1_BASE, 1);
+               write_register(kgd, SH_MEM_APE1_LIMIT, 0);
+
+               unlock_srbm(kgd);
+       }
+
+       return 0;
+}
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+                               uint32_t hpd_size, uint64_t hpd_gpu_addr)
+{
+       uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+       uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+       lock_srbm(kgd, mec, pipe, 0, 0);
+       write_register(kgd, CP_HPD_EOP_BASE_ADDR,
+                       lower_32_bits(hpd_gpu_addr >> 8));
+       write_register(kgd, CP_HPD_EOP_BASE_ADDR_HI,
+                       upper_32_bits(hpd_gpu_addr >> 8));
+       write_register(kgd, CP_HPD_EOP_VMID, 0);
+       write_register(kgd, CP_HPD_EOP_CONTROL, hpd_size);
+       unlock_srbm(kgd);
+
+       return 0;
+}
+
+static inline struct cik_mqd *get_mqd(void *mqd)
+{
+       return (struct cik_mqd *)mqd;
+}
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+                       uint32_t queue_id, uint32_t __user *wptr)
+{
+       uint32_t wptr_shadow, is_wptr_shadow_valid;
+       struct cik_mqd *m;
+
+       m = get_mqd(mqd);
+
+       is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
+
+       acquire_queue(kgd, pipe_id, queue_id);
+       write_register(kgd, CP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
+       write_register(kgd, CP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
+       write_register(kgd, CP_MQD_CONTROL, m->cp_mqd_control);
+
+       write_register(kgd, CP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
+       write_register(kgd, CP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
+       write_register(kgd, CP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
+
+       write_register(kgd, CP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
+       write_register(kgd, CP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo);
+       write_register(kgd, CP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi);
+
+       write_register(kgd, CP_HQD_IB_RPTR, m->cp_hqd_ib_rptr);
+
+       write_register(kgd, CP_HQD_PERSISTENT_STATE,
+                       m->cp_hqd_persistent_state);
+       write_register(kgd, CP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd);
+       write_register(kgd, CP_HQD_MSG_TYPE, m->cp_hqd_msg_type);
+
+       write_register(kgd, CP_HQD_ATOMIC0_PREOP_LO,
+                       m->cp_hqd_atomic0_preop_lo);
+
+       write_register(kgd, CP_HQD_ATOMIC0_PREOP_HI,
+                       m->cp_hqd_atomic0_preop_hi);
+
+       write_register(kgd, CP_HQD_ATOMIC1_PREOP_LO,
+                       m->cp_hqd_atomic1_preop_lo);
+
+       write_register(kgd, CP_HQD_ATOMIC1_PREOP_HI,
+                       m->cp_hqd_atomic1_preop_hi);
+
+       write_register(kgd, CP_HQD_PQ_RPTR_REPORT_ADDR,
+                       m->cp_hqd_pq_rptr_report_addr_lo);
+
+       write_register(kgd, CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+                       m->cp_hqd_pq_rptr_report_addr_hi);
+
+       write_register(kgd, CP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr);
+
+       write_register(kgd, CP_HQD_PQ_WPTR_POLL_ADDR,
+                       m->cp_hqd_pq_wptr_poll_addr_lo);
+
+       write_register(kgd, CP_HQD_PQ_WPTR_POLL_ADDR_HI,
+                       m->cp_hqd_pq_wptr_poll_addr_hi);
+
+       write_register(kgd, CP_HQD_PQ_DOORBELL_CONTROL,
+                       m->cp_hqd_pq_doorbell_control);
+
+       write_register(kgd, CP_HQD_VMID, m->cp_hqd_vmid);
+
+       write_register(kgd, CP_HQD_QUANTUM, m->cp_hqd_quantum);
+
+       write_register(kgd, CP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
+       write_register(kgd, CP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
+
+       write_register(kgd, CP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr);
+
+       if (is_wptr_shadow_valid)
+               write_register(kgd, CP_HQD_PQ_WPTR, wptr_shadow);
+
+       write_register(kgd, CP_HQD_ACTIVE, m->cp_hqd_active);
+       release_queue(kgd);
+
+       return 0;
+}
+
+static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+                               uint32_t pipe_id, uint32_t queue_id)
+{
+       uint32_t act;
+       bool retval = false;
+       uint32_t low, high;
+
+       acquire_queue(kgd, pipe_id, queue_id);
+       act = read_register(kgd, CP_HQD_ACTIVE);
+       if (act) {
+               low = lower_32_bits(queue_address >> 8);
+               high = upper_32_bits(queue_address >> 8);
+
+               if (low == read_register(kgd, CP_HQD_PQ_BASE) &&
+                               high == read_register(kgd, CP_HQD_PQ_BASE_HI))
+                       retval = true;
+       }
+       release_queue(kgd);
+       return retval;
+}
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+                               unsigned int timeout, uint32_t pipe_id,
+                               uint32_t queue_id)
+{
+       uint32_t temp;
+
+       acquire_queue(kgd, pipe_id, queue_id);
+       write_register(kgd, CP_HQD_PQ_DOORBELL_CONTROL, 0);
+
+       write_register(kgd, CP_HQD_DEQUEUE_REQUEST, reset_type);
+
+       while (true) {
+               temp = read_register(kgd, CP_HQD_ACTIVE);
+               if (temp & 0x1)
+                       break;
+               if (timeout == 0) {
+                       pr_err("kfd: cp queue preemption time out (%dms)\n",
+                               temp);
+                       return -ETIME;
+               }
+               msleep(20);
+               timeout -= 20;
+       }
+
+       release_queue(kgd);
+       return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.h b/drivers/gpu/drm/radeon/radeon_kfd.h
new file mode 100644 (file)
index 0000000..f90e161
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * radeon_kfd.h defines the private interface between the
+ * AMD kernel graphics drivers and the AMD KFD.
+ */
+
+#ifndef RADEON_KFD_H_INCLUDED
+#define RADEON_KFD_H_INCLUDED
+
+#include <linux/types.h>
+#include "../amd/include/kgd_kfd_interface.h"
+
+struct radeon_device;
+
+bool radeon_kfd_init(void);
+void radeon_kfd_fini(void);
+
+void radeon_kfd_suspend(struct radeon_device *rdev);
+int radeon_kfd_resume(struct radeon_device *rdev);
+void radeon_kfd_interrupt(struct radeon_device *rdev,
+                       const void *ih_ring_entry);
+void radeon_kfd_device_probe(struct radeon_device *rdev);
+void radeon_kfd_device_init(struct radeon_device *rdev);
+void radeon_kfd_device_fini(struct radeon_device *rdev);
+
+#endif /* RADEON_KFD_H_INCLUDED */
index 8309b11e674d8506bfeb3f05530407ede4b60642..3cf9c1fa64756fb6b4430d5e351f21aee874ad1d 100644 (file)
@@ -34,6 +34,8 @@
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
 
+#include "radeon_kfd.h"
+
 #if defined(CONFIG_VGA_SWITCHEROO)
 bool radeon_has_atpx(void);
 #else
@@ -63,6 +65,8 @@ int radeon_driver_unload_kms(struct drm_device *dev)
 
        pm_runtime_get_sync(dev->dev);
 
+       radeon_kfd_device_fini(rdev);
+
        radeon_acpi_fini(rdev);
        
        radeon_modeset_fini(rdev);
@@ -142,6 +146,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
                                "Error during ACPI methods call\n");
        }
 
+       radeon_kfd_device_probe(rdev);
+       radeon_kfd_device_init(rdev);
+
        if (radeon_is_px(dev)) {
                pm_runtime_use_autosuspend(dev->dev);
                pm_runtime_set_autosuspend_delay(dev->dev, 5000);
@@ -621,8 +628,6 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                                                  RADEON_VA_IB_OFFSET,
                                                  RADEON_VM_PAGE_READABLE |
                                                  RADEON_VM_PAGE_SNOOPED);
-
-                       radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
                        if (r) {
                                radeon_vm_fini(rdev, vm);
                                kfree(fpriv);
@@ -795,6 +800,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
 
        /* Get associated drm_crtc: */
        drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
+       if (!drmcrtc)
+               return -EINVAL;
 
        /* Helper routine in DRM core does all the work: */
        return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
index cafb1ccf2ec3213b2dc394314dbece153d88ef00..678b4386540d52ea22a5f2cb3b9e583f87232cfe 100644 (file)
@@ -1054,6 +1054,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
                        DRM_ERROR("Mode need scaling but only first crtc can do that.\n");
                }
        }
+       radeon_cursor_reset(crtc);
        return 0;
 }
 
index 04db2fdd869271f81482e75913d6ec4002ffecac..390db897f322b48b2cd31399e0fddf5264535780 100644 (file)
@@ -321,6 +321,10 @@ struct radeon_crtc {
        uint32_t crtc_offset;
        struct drm_gem_object *cursor_bo;
        uint64_t cursor_addr;
+       int cursor_x;
+       int cursor_y;
+       int cursor_hot_x;
+       int cursor_hot_y;
        int cursor_width;
        int cursor_height;
        int max_cursor_width;
@@ -462,6 +466,7 @@ struct radeon_gpio_rec {
        u8 id;
        u32 reg;
        u32 mask;
+       u32 shift;
 };
 
 struct radeon_hpd {
@@ -748,6 +753,8 @@ extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
 extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
                                             struct radeon_atom_ss *ss,
                                             int id, u32 clock);
+extern struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev,
+                                                         u8 id);
 
 extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
                                      uint64_t freq,
@@ -802,13 +809,16 @@ extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
 extern int radeon_crtc_do_set_base(struct drm_crtc *crtc,
                                   struct drm_framebuffer *fb,
                                   int x, int y, int atomic);
-extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
-                                 struct drm_file *file_priv,
-                                 uint32_t handle,
-                                 uint32_t width,
-                                 uint32_t height);
+extern int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
+                                  struct drm_file *file_priv,
+                                  uint32_t handle,
+                                  uint32_t width,
+                                  uint32_t height,
+                                  int32_t hot_x,
+                                  int32_t hot_y);
 extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
                                   int x, int y);
+extern void radeon_cursor_reset(struct drm_crtc *crtc);
 
 extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
                                      unsigned int flags,
index 99a960a4f30265c893a411763843a260d38e6a41..7d68223eb4692a026fbad21b68bbe9362ee6d87a 100644 (file)
@@ -99,22 +99,39 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
 
        rbo->placement.placement = rbo->placements;
        rbo->placement.busy_placement = rbo->placements;
-       if (domain & RADEON_GEM_DOMAIN_VRAM)
+       if (domain & RADEON_GEM_DOMAIN_VRAM) {
+               /* Try placing BOs which don't need CPU access outside of the
+                * CPU accessible part of VRAM
+                */
+               if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
+                   rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
+                       rbo->placements[c].fpfn =
+                               rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+                       rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+                                                    TTM_PL_FLAG_UNCACHED |
+                                                    TTM_PL_FLAG_VRAM;
+               }
+
+               rbo->placements[c].fpfn = 0;
                rbo->placements[c++].flags = TTM_PL_FLAG_WC |
                                             TTM_PL_FLAG_UNCACHED |
                                             TTM_PL_FLAG_VRAM;
+       }
 
        if (domain & RADEON_GEM_DOMAIN_GTT) {
                if (rbo->flags & RADEON_GEM_GTT_UC) {
+                       rbo->placements[c].fpfn = 0;
                        rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
                                TTM_PL_FLAG_TT;
 
                } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
                           (rbo->rdev->flags & RADEON_IS_AGP)) {
+                       rbo->placements[c].fpfn = 0;
                        rbo->placements[c++].flags = TTM_PL_FLAG_WC |
                                TTM_PL_FLAG_UNCACHED |
                                TTM_PL_FLAG_TT;
                } else {
+                       rbo->placements[c].fpfn = 0;
                        rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
                                                     TTM_PL_FLAG_TT;
                }
@@ -122,30 +139,35 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
 
        if (domain & RADEON_GEM_DOMAIN_CPU) {
                if (rbo->flags & RADEON_GEM_GTT_UC) {
+                       rbo->placements[c].fpfn = 0;
                        rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
                                TTM_PL_FLAG_SYSTEM;
 
                } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
                    rbo->rdev->flags & RADEON_IS_AGP) {
+                       rbo->placements[c].fpfn = 0;
                        rbo->placements[c++].flags = TTM_PL_FLAG_WC |
                                TTM_PL_FLAG_UNCACHED |
                                TTM_PL_FLAG_SYSTEM;
                } else {
+                       rbo->placements[c].fpfn = 0;
                        rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
                                                     TTM_PL_FLAG_SYSTEM;
                }
        }
-       if (!c)
+       if (!c) {
+               rbo->placements[c].fpfn = 0;
                rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
                                             TTM_PL_FLAG_SYSTEM;
+       }
 
        rbo->placement.num_placement = c;
        rbo->placement.num_busy_placement = c;
 
        for (i = 0; i < c; ++i) {
-               rbo->placements[i].fpfn = 0;
                if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
-                   (rbo->placements[i].flags & TTM_PL_FLAG_VRAM))
+                   (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+                   !rbo->placements[i].fpfn)
                        rbo->placements[i].lpfn =
                                rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
                else
@@ -157,9 +179,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
         * improve fragmentation quality.
         * 512kb was measured as the most optimal number.
         */
-       if (!((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
-             (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) &&
-           rbo->tbo.mem.size > 512 * 1024) {
+       if (rbo->tbo.mem.size > 512 * 1024) {
                for (i = 0; i < c; i++) {
                        rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
                }
@@ -213,6 +233,13 @@ int radeon_bo_create(struct radeon_device *rdev,
        if (!(rdev->flags & RADEON_IS_PCIE))
                bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
 
+#ifdef CONFIG_X86_32
+       /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
+        * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
+        */
+       bo->flags &= ~RADEON_GEM_GTT_WC;
+#endif
+
        radeon_ttm_placement_from_domain(bo, domain);
        /* Kernel allocation are uninterruptible */
        down_read(&rdev->pm.mclk_lock);
@@ -482,25 +509,29 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                            struct ww_acquire_ctx *ticket,
                            struct list_head *head, int ring)
 {
-       struct radeon_cs_reloc *lobj;
-       struct radeon_bo *bo;
+       struct radeon_bo_list *lobj;
+       struct list_head duplicates;
        int r;
        u64 bytes_moved = 0, initial_bytes_moved;
        u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
 
-       r = ttm_eu_reserve_buffers(ticket, head, true);
+       INIT_LIST_HEAD(&duplicates);
+       r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
        if (unlikely(r != 0)) {
                return r;
        }
 
        list_for_each_entry(lobj, head, tv.head) {
-               bo = lobj->robj;
+               struct radeon_bo *bo = lobj->robj;
                if (!bo->pin_count) {
                        u32 domain = lobj->prefered_domains;
                        u32 allowed = lobj->allowed_domains;
                        u32 current_domain =
                                radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
 
+                       WARN_ONCE(bo->gem_base.dumb,
+                                 "GPU use of dumb buffer is illegal.\n");
+
                        /* Check if this buffer will be moved and don't move it
                         * if we have moved too many buffers for this IB already.
                         *
@@ -539,6 +570,12 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                lobj->gpu_offset = radeon_bo_gpu_offset(bo);
                lobj->tiling_flags = bo->tiling_flags;
        }
+
+       list_for_each_entry(lobj, &duplicates, tv.head) {
+               lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
+               lobj->tiling_flags = lobj->robj->tiling_flags;
+       }
+
        return 0;
 }
 
@@ -743,8 +780,8 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
        struct radeon_device *rdev;
        struct radeon_bo *rbo;
-       unsigned long offset, size;
-       int r;
+       unsigned long offset, size, lpfn;
+       int i, r;
 
        if (!radeon_ttm_bo_is_radeon_bo(bo))
                return 0;
@@ -761,7 +798,13 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 
        /* hurrah the memory is not visible ! */
        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
-       rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+       lpfn =  rdev->mc.visible_vram_size >> PAGE_SHIFT;
+       for (i = 0; i < rbo->placement.num_placement; i++) {
+               /* Force into visible VRAM */
+               if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+                   (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
+                       rbo->placements[i].lpfn = lpfn;
+       }
        r = ttm_bo_validate(bo, &rbo->placement, false, false);
        if (unlikely(r == -ENOMEM)) {
                radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
@@ -792,3 +835,22 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
        ttm_bo_unreserve(&bo->tbo);
        return r;
 }
+
+/**
+ * radeon_bo_fence - add fence to buffer object
+ *
+ * @bo: buffer object in question
+ * @fence: fence to add
+ * @shared: true if fence should be added shared
+ *
+ */
+void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
+                     bool shared)
+{
+       struct reservation_object *resv = bo->tbo.resv;
+
+       if (shared)
+               reservation_object_add_shared_fence(resv, &fence->base);
+       else
+               reservation_object_add_excl_fence(resv, &fence->base);
+}
index 1b8ec7917154809e10336fb346c6aacbafa72939..3b0b377f76cb08c2aa6124896818c6567ff52db4 100644 (file)
@@ -155,6 +155,8 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
                                  struct ttm_mem_reg *new_mem);
 extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
+extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
+                           bool shared);
 
 /*
  * sub allocation
index 6deb08f045b758a9cbaefec886177b86c950a567..e6ad54cdfa6279284771d4df897f7860b0ae06c6 100644 (file)
 int radeon_semaphore_create(struct radeon_device *rdev,
                            struct radeon_semaphore **semaphore)
 {
-       uint64_t *cpu_addr;
-       int i, r;
+       int r;
 
        *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
        if (*semaphore == NULL) {
                return -ENOMEM;
        }
-       r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
-                            8 * RADEON_NUM_SYNCS, 8);
+       r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
+                            &(*semaphore)->sa_bo, 8, 8);
        if (r) {
                kfree(*semaphore);
                *semaphore = NULL;
@@ -51,12 +50,7 @@ int radeon_semaphore_create(struct radeon_device *rdev,
        (*semaphore)->waiters = 0;
        (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
 
-       cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo);
-       for (i = 0; i < RADEON_NUM_SYNCS; ++i)
-               cpu_addr[i] = 0;
-
-       for (i = 0; i < RADEON_NUM_RINGS; ++i)
-               (*semaphore)->sync_to[i] = NULL;
+       *((uint64_t *)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
 
        return 0;
 }
@@ -95,146 +89,6 @@ bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
        return false;
 }
 
-/**
- * radeon_semaphore_sync_fence - use the semaphore to sync to a fence
- *
- * @semaphore: semaphore object to add fence to
- * @fence: fence to sync to
- *
- * Sync to the fence using this semaphore object
- */
-void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
-                                struct radeon_fence *fence)
-{
-        struct radeon_fence *other;
-
-        if (!fence)
-                return;
-
-        other = semaphore->sync_to[fence->ring];
-        semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
-}
-
-/**
- * radeon_semaphore_sync_to - use the semaphore to sync to a reservation object
- *
- * @sema: semaphore object to add fence from reservation object to
- * @resv: reservation object with embedded fence
- * @shared: true if we should onyl sync to the exclusive fence
- *
- * Sync to the fence using this semaphore object
- */
-int radeon_semaphore_sync_resv(struct radeon_device *rdev,
-                              struct radeon_semaphore *sema,
-                              struct reservation_object *resv,
-                              bool shared)
-{
-       struct reservation_object_list *flist;
-       struct fence *f;
-       struct radeon_fence *fence;
-       unsigned i;
-       int r = 0;
-
-       /* always sync to the exclusive fence */
-       f = reservation_object_get_excl(resv);
-       fence = f ? to_radeon_fence(f) : NULL;
-       if (fence && fence->rdev == rdev)
-               radeon_semaphore_sync_fence(sema, fence);
-       else if (f)
-               r = fence_wait(f, true);
-
-       flist = reservation_object_get_list(resv);
-       if (shared || !flist || r)
-               return r;
-
-       for (i = 0; i < flist->shared_count; ++i) {
-               f = rcu_dereference_protected(flist->shared[i],
-                                             reservation_object_held(resv));
-               fence = to_radeon_fence(f);
-               if (fence && fence->rdev == rdev)
-                       radeon_semaphore_sync_fence(sema, fence);
-               else
-                       r = fence_wait(f, true);
-
-               if (r)
-                       break;
-       }
-       return r;
-}
-
-/**
- * radeon_semaphore_sync_rings - sync ring to all registered fences
- *
- * @rdev: radeon_device pointer
- * @semaphore: semaphore object to use for sync
- * @ring: ring that needs sync
- *
- * Ensure that all registered fences are signaled before letting
- * the ring continue. The caller must hold the ring lock.
- */
-int radeon_semaphore_sync_rings(struct radeon_device *rdev,
-                               struct radeon_semaphore *semaphore,
-                               int ring)
-{
-       unsigned count = 0;
-       int i, r;
-
-        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-               struct radeon_fence *fence = semaphore->sync_to[i];
-
-               /* check if we really need to sync */
-                if (!radeon_fence_need_sync(fence, ring))
-                       continue;
-
-               /* prevent GPU deadlocks */
-               if (!rdev->ring[i].ready) {
-                       dev_err(rdev->dev, "Syncing to a disabled ring!");
-                       return -EINVAL;
-               }
-
-               if (++count > RADEON_NUM_SYNCS) {
-                       /* not enough room, wait manually */
-                       r = radeon_fence_wait(fence, false);
-                       if (r)
-                               return r;
-                       continue;
-               }
-
-               /* allocate enough space for sync command */
-               r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
-               if (r) {
-                       return r;
-               }
-
-               /* emit the signal semaphore */
-               if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
-                       /* signaling wasn't successful wait manually */
-                       radeon_ring_undo(&rdev->ring[i]);
-                       r = radeon_fence_wait(fence, false);
-                       if (r)
-                               return r;
-                       continue;
-               }
-
-               /* we assume caller has already allocated space on waiters ring */
-               if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
-                       /* waiting wasn't successful wait manually */
-                       radeon_ring_undo(&rdev->ring[i]);
-                       r = radeon_fence_wait(fence, false);
-                       if (r)
-                               return r;
-                       continue;
-               }
-
-               radeon_ring_commit(rdev, &rdev->ring[i], false);
-               radeon_fence_note_sync(fence, ring);
-
-               semaphore->gpu_addr += 8;
-       }
-
-       return 0;
-}
-
 void radeon_semaphore_free(struct radeon_device *rdev,
                           struct radeon_semaphore **semaphore,
                           struct radeon_fence *fence)
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
new file mode 100644 (file)
index 0000000..02ac8a1
--- /dev/null
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Christian König <christian.koenig@amd.com>
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_trace.h"
+
+/**
+ * radeon_sync_create - zero init sync object
+ *
+ * @sync: sync object to initialize
+ *
+ * Just clear the sync object for now.
+ */
+void radeon_sync_create(struct radeon_sync *sync)
+{
+       unsigned i;
+
+       for (i = 0; i < RADEON_NUM_SYNCS; ++i)
+               sync->semaphores[i] = NULL;
+
+       for (i = 0; i < RADEON_NUM_RINGS; ++i)
+               sync->sync_to[i] = NULL;
+
+       sync->last_vm_update = NULL;
+}
+
+/**
+ * radeon_sync_fence - use the semaphore to sync to a fence
+ *
+ * @sync: sync object to add fence to
+ * @fence: fence to sync to
+ *
+ * Sync to the fence using the semaphore objects
+ */
+void radeon_sync_fence(struct radeon_sync *sync,
+                      struct radeon_fence *fence)
+{
+       struct radeon_fence *other;
+
+       if (!fence)
+               return;
+
+       other = sync->sync_to[fence->ring];
+       sync->sync_to[fence->ring] = radeon_fence_later(fence, other);
+
+       if (fence->is_vm_update) {
+               other = sync->last_vm_update;
+               sync->last_vm_update = radeon_fence_later(fence, other);
+       }
+}
+
+/**
+ * radeon_sync_resv - use the semaphores to sync to a reservation object
+ *
+ * @sync: sync object to add fences from reservation object to
+ * @resv: reservation object with embedded fence
+ * @shared: true if we should only sync to the exclusive fence
+ *
+ * Sync to the fence using the semaphore objects
+ */
+int radeon_sync_resv(struct radeon_device *rdev,
+                    struct radeon_sync *sync,
+                    struct reservation_object *resv,
+                    bool shared)
+{
+       struct reservation_object_list *flist;
+       struct fence *f;
+       struct radeon_fence *fence;
+       unsigned i;
+       int r = 0;
+
+       /* always sync to the exclusive fence */
+       f = reservation_object_get_excl(resv);
+       fence = f ? to_radeon_fence(f) : NULL;
+       if (fence && fence->rdev == rdev)
+               radeon_sync_fence(sync, fence);
+       else if (f)
+               r = fence_wait(f, true);
+
+       flist = reservation_object_get_list(resv);
+       if (shared || !flist || r)
+               return r;
+
+       for (i = 0; i < flist->shared_count; ++i) {
+               f = rcu_dereference_protected(flist->shared[i],
+                                             reservation_object_held(resv));
+               fence = to_radeon_fence(f);
+               if (fence && fence->rdev == rdev)
+                       radeon_sync_fence(sync, fence);
+               else
+                       r = fence_wait(f, true);
+
+               if (r)
+                       break;
+       }
+       return r;
+}
+
+/**
+ * radeon_sync_rings - sync ring to all registered fences
+ *
+ * @rdev: radeon_device pointer
+ * @sync: sync object to use
+ * @ring: ring that needs sync
+ *
+ * Ensure that all registered fences are signaled before letting
+ * the ring continue. The caller must hold the ring lock.
+ */
+int radeon_sync_rings(struct radeon_device *rdev,
+                     struct radeon_sync *sync,
+                     int ring)
+{
+       unsigned count = 0;
+       int i, r;
+
+       for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+               struct radeon_fence *fence = sync->sync_to[i];
+               struct radeon_semaphore *semaphore;
+
+               /* check if we really need to sync */
+               if (!radeon_fence_need_sync(fence, ring))
+                       continue;
+
+               /* prevent GPU deadlocks */
+               if (!rdev->ring[i].ready) {
+                       dev_err(rdev->dev, "Syncing to a disabled ring!");
+                       return -EINVAL;
+               }
+
+               if (count >= RADEON_NUM_SYNCS) {
+                       /* not enough room, wait manually */
+                       r = radeon_fence_wait(fence, false);
+                       if (r)
+                               return r;
+                       continue;
+               }
+               r = radeon_semaphore_create(rdev, &semaphore);
+               if (r)
+                       return r;
+
+               sync->semaphores[count++] = semaphore;
+
+               /* allocate enough space for sync command */
+               r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
+               if (r)
+                       return r;
+
+               /* emit the signal semaphore */
+               if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
+                       /* signaling wasn't successful wait manually */
+                       radeon_ring_undo(&rdev->ring[i]);
+                       r = radeon_fence_wait(fence, false);
+                       if (r)
+                               return r;
+                       continue;
+               }
+
+               /* we assume caller has already allocated space on waiters ring */
+               if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
+                       /* waiting wasn't successful wait manually */
+                       radeon_ring_undo(&rdev->ring[i]);
+                       r = radeon_fence_wait(fence, false);
+                       if (r)
+                               return r;
+                       continue;
+               }
+
+               radeon_ring_commit(rdev, &rdev->ring[i], false);
+               radeon_fence_note_sync(fence, ring);
+       }
+
+       return 0;
+}
+
+/**
+ * radeon_sync_free - free the sync object
+ *
+ * @rdev: radeon_device pointer
+ * @sync: sync object to use
+ * @fence: fence to use for the free
+ *
+ * Free the sync object by freeing all semaphores in it.
+ */
+void radeon_sync_free(struct radeon_device *rdev,
+                     struct radeon_sync *sync,
+                     struct radeon_fence *fence)
+{
+       unsigned i;
+
+       for (i = 0; i < RADEON_NUM_SYNCS; ++i)
+               radeon_semaphore_free(rdev, &sync->semaphores[i], fence);
+}
index 9db74a96ef617d7d66292023d8250ca3821e236a..ce075cb08cb2b7b618a78c58d74915f06df3c8ff 100644 (file)
@@ -38,7 +38,7 @@ TRACE_EVENT(radeon_cs,
 
            TP_fast_assign(
                           __entry->ring = p->ring;
-                          __entry->dw = p->chunks[p->chunk_ib_idx].length_dw;
+                          __entry->dw = p->chunk_ib->length_dw;
                           __entry->fences = radeon_fence_count_emitted(
                                p->rdev, p->ring);
                           ),
index 8624979afb65f55ecba4ffb888aa09d0b3c8b39d..d02aa1d0f5885408c877056bd4ac1ab0e1ed6f12 100644 (file)
@@ -196,9 +196,32 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
        rbo = container_of(bo, struct radeon_bo, tbo);
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
-               if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
+               if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
                        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
-               else
+               else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
+                        bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
+                       unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+                       int i;
+
+                       /* Try evicting to the CPU inaccessible part of VRAM
+                        * first, but only set GTT as busy placement, so this
+                        * BO will be evicted to GTT rather than causing other
+                        * BOs to be evicted from VRAM
+                        */
+                       radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
+                                                        RADEON_GEM_DOMAIN_GTT);
+                       rbo->placement.num_busy_placement = 0;
+                       for (i = 0; i < rbo->placement.num_placement; i++) {
+                               if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
+                                       if (rbo->placements[0].fpfn < fpfn)
+                                               rbo->placements[0].fpfn = fpfn;
+                               } else {
+                                       rbo->placement.busy_placement =
+                                               &rbo->placements[i];
+                                       rbo->placement.num_busy_placement = 1;
+                               }
+                       }
+               } else
                        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
                break;
        case TTM_PL_TT:
index 11b6624692536c13dbea4cc4e9b4c75b46902d1d..c10b2aec6450fa8ceb366a691ea0898aff14dcbc 100644 (file)
@@ -488,12 +488,12 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
                               unsigned buf_sizes[], bool *has_msg_cmd)
 {
        struct radeon_cs_chunk *relocs_chunk;
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        unsigned idx, cmd, offset;
        uint64_t start, end;
        int r;
 
-       relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+       relocs_chunk = p->chunk_relocs;
        offset = radeon_get_ib_value(p, data0);
        idx = radeon_get_ib_value(p, data1);
        if (idx >= relocs_chunk->length_dw) {
@@ -502,7 +502,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
                return -EINVAL;
        }
 
-       reloc = p->relocs_ptr[(idx / 4)];
+       reloc = &p->relocs[(idx / 4)];
        start = reloc->gpu_offset;
        end = start + radeon_bo_size(reloc->robj);
        start += offset;
@@ -610,13 +610,13 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
                [0x00000003]    =       2048,
        };
 
-       if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
+       if (p->chunk_ib->length_dw % 16) {
                DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
-                         p->chunks[p->chunk_ib_idx].length_dw);
+                         p->chunk_ib->length_dw);
                return -EINVAL;
        }
 
-       if (p->chunk_relocs_idx == -1) {
+       if (p->chunk_relocs == NULL) {
                DRM_ERROR("No relocation chunk !\n");
                return -EINVAL;
        }
@@ -640,7 +640,7 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
                        DRM_ERROR("Unknown packet type %d !\n", pkt.type);
                        return -EINVAL;
                }
-       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       } while (p->idx < p->chunk_ib->length_dw);
 
        if (!has_msg_cmd) {
                DRM_ERROR("UVD-IBs need a msg command!\n");
index 9e85757d55991a6cd0e077cada024d87f0484c08..976fe432f4e26fe83682d655ea983817cc19f05f 100644 (file)
@@ -453,11 +453,11 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
                        unsigned size)
 {
        struct radeon_cs_chunk *relocs_chunk;
-       struct radeon_cs_reloc *reloc;
+       struct radeon_bo_list *reloc;
        uint64_t start, end, offset;
        unsigned idx;
 
-       relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+       relocs_chunk = p->chunk_relocs;
        offset = radeon_get_ib_value(p, lo);
        idx = radeon_get_ib_value(p, hi);
 
@@ -467,7 +467,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
                return -EINVAL;
        }
 
-       reloc = p->relocs_ptr[(idx / 4)];
+       reloc = &p->relocs[(idx / 4)];
        start = reloc->gpu_offset;
        end = start + radeon_bo_size(reloc->robj);
        start += offset;
@@ -534,7 +534,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
        uint32_t *size = &tmp;
        int i, r;
 
-       while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
+       while (p->idx < p->chunk_ib->length_dw) {
                uint32_t len = radeon_get_ib_value(p, p->idx);
                uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
 
index dfde266529e2a787fd953a85bb12c4673e448d37..cde48c42b30ad4b63c27dde6741f8637840670fc 100644 (file)
@@ -125,41 +125,37 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
  * Add the page directory to the list of BOs to
  * validate for command submission (cayman+).
  */
-struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
+struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
                                          struct radeon_vm *vm,
                                          struct list_head *head)
 {
-       struct radeon_cs_reloc *list;
+       struct radeon_bo_list *list;
        unsigned i, idx;
 
        list = drm_malloc_ab(vm->max_pde_used + 2,
-                            sizeof(struct radeon_cs_reloc));
+                            sizeof(struct radeon_bo_list));
        if (!list)
                return NULL;
 
        /* add the vm page table to the list */
-       list[0].gobj = NULL;
        list[0].robj = vm->page_directory;
        list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
        list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
        list[0].tv.bo = &vm->page_directory->tbo;
-       list[0].tv.shared = false;
+       list[0].tv.shared = true;
        list[0].tiling_flags = 0;
-       list[0].handle = 0;
        list_add(&list[0].tv.head, head);
 
        for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
                if (!vm->page_tables[i].bo)
                        continue;
 
-               list[idx].gobj = NULL;
                list[idx].robj = vm->page_tables[i].bo;
                list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
                list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
                list[idx].tv.bo = &list[idx].robj->tbo;
-               list[idx].tv.shared = false;
+               list[idx].tv.shared = true;
                list[idx].tiling_flags = 0;
-               list[idx].handle = 0;
                list_add(&list[idx++].tv.head, head);
        }
 
@@ -182,15 +178,18 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
                                       struct radeon_vm *vm, int ring)
 {
        struct radeon_fence *best[RADEON_NUM_RINGS] = {};
+       struct radeon_vm_id *vm_id = &vm->ids[ring];
+
        unsigned choices[2] = {};
        unsigned i;
 
        /* check if the id is still valid */
-       if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
+       if (vm_id->id && vm_id->last_id_use &&
+           vm_id->last_id_use == rdev->vm_manager.active[vm_id->id])
                return NULL;
 
        /* we definately need to flush */
-       radeon_fence_unref(&vm->last_flush);
+       vm_id->pd_gpu_addr = ~0ll;
 
        /* skip over VMID 0, since it is the system VM */
        for (i = 1; i < rdev->vm_manager.nvm; ++i) {
@@ -198,8 +197,8 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
 
                if (fence == NULL) {
                        /* found a free one */
-                       vm->id = i;
-                       trace_radeon_vm_grab_id(vm->id, ring);
+                       vm_id->id = i;
+                       trace_radeon_vm_grab_id(i, ring);
                        return NULL;
                }
 
@@ -211,8 +210,8 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
 
        for (i = 0; i < 2; ++i) {
                if (choices[i]) {
-                       vm->id = choices[i];
-                       trace_radeon_vm_grab_id(vm->id, ring);
+                       vm_id->id = choices[i];
+                       trace_radeon_vm_grab_id(choices[i], ring);
                        return rdev->vm_manager.active[choices[i]];
                }
        }
@@ -228,6 +227,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
  * @rdev: radeon_device pointer
  * @vm: vm we want to flush
  * @ring: ring to use for flush
+ * @updates: last vm update that is waited for
  *
  * Flush the vm (cayman+).
  *
@@ -235,15 +235,21 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
  */
 void radeon_vm_flush(struct radeon_device *rdev,
                     struct radeon_vm *vm,
-                    int ring)
+                    int ring, struct radeon_fence *updates)
 {
        uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
+       struct radeon_vm_id *vm_id = &vm->ids[ring];
+
+       if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
+           radeon_fence_is_earlier(vm_id->flushed_updates, updates)) {
+
+               trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id);
+               radeon_fence_unref(&vm_id->flushed_updates);
+               vm_id->flushed_updates = radeon_fence_ref(updates);
+               vm_id->pd_gpu_addr = pd_addr;
+               radeon_ring_vm_flush(rdev, &rdev->ring[ring],
+                                    vm_id->id, vm_id->pd_gpu_addr);
 
-       /* if we can't remember our last VM flush then flush now! */
-       if (!vm->last_flush || pd_addr != vm->pd_gpu_addr) {
-               trace_radeon_vm_flush(pd_addr, ring, vm->id);
-               vm->pd_gpu_addr = pd_addr;
-               radeon_ring_vm_flush(rdev, ring, vm);
        }
 }
 
@@ -263,18 +269,13 @@ void radeon_vm_fence(struct radeon_device *rdev,
                     struct radeon_vm *vm,
                     struct radeon_fence *fence)
 {
-       radeon_fence_unref(&vm->fence);
-       vm->fence = radeon_fence_ref(fence);
-
-       radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
-       rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
+       unsigned vm_id = vm->ids[fence->ring].id;
 
-       radeon_fence_unref(&vm->last_id_use);
-       vm->last_id_use = radeon_fence_ref(fence);
+       radeon_fence_unref(&rdev->vm_manager.active[vm_id]);
+       rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence);
 
-        /* we just flushed the VM, remember that */
-        if (!vm->last_flush)
-                vm->last_flush = radeon_fence_ref(fence);
+       radeon_fence_unref(&vm->ids[fence->ring].last_id_use);
+       vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence);
 }
 
 /**
@@ -387,35 +388,25 @@ static void radeon_vm_set_pages(struct radeon_device *rdev,
 static int radeon_vm_clear_bo(struct radeon_device *rdev,
                              struct radeon_bo *bo)
 {
-        struct ttm_validate_buffer tv;
-        struct ww_acquire_ctx ticket;
-        struct list_head head;
        struct radeon_ib ib;
        unsigned entries;
        uint64_t addr;
        int r;
 
-        memset(&tv, 0, sizeof(tv));
-        tv.bo = &bo->tbo;
-       tv.shared = false;
-
-        INIT_LIST_HEAD(&head);
-        list_add(&tv.head, &head);
-
-        r = ttm_eu_reserve_buffers(&ticket, &head, true);
-        if (r)
+       r = radeon_bo_reserve(bo, false);
+       if (r)
                return r;
 
-        r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
-        if (r)
-                goto error;
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+       if (r)
+               goto error_unreserve;
 
        addr = radeon_bo_gpu_offset(bo);
        entries = radeon_bo_size(bo) / 8;
 
        r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
        if (r)
-                goto error;
+               goto error_unreserve;
 
        ib.length_dw = 0;
 
@@ -425,15 +416,16 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
 
        r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r)
-                goto error;
+               goto error_free;
 
-       ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
-       radeon_ib_free(rdev, &ib);
+       ib.fence->is_vm_update = true;
+       radeon_bo_fence(bo, ib.fence, false);
 
-       return 0;
+error_free:
+       radeon_ib_free(rdev, &ib);
 
-error:
-       ttm_eu_backoff_reservation(&ticket, &head);
+error_unreserve:
+       radeon_bo_unreserve(bo);
        return r;
 }
 
@@ -449,7 +441,7 @@ error:
  * Validate and set the offset requested within the vm address space.
  * Returns 0 for success, error for failure.
  *
- * Object has to be reserved!
+ * Object has to be reserved and gets unreserved by this function!
  */
 int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                          struct radeon_bo_va *bo_va,
@@ -495,7 +487,9 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                        tmp->vm = vm;
                        tmp->addr = bo_va->addr;
                        tmp->bo = radeon_bo_ref(bo_va->bo);
+                       spin_lock(&vm->status_lock);
                        list_add(&tmp->vm_status, &vm->freed);
+                       spin_unlock(&vm->status_lock);
                }
 
                interval_tree_remove(&bo_va->it, &vm->va);
@@ -575,7 +569,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
        }
 
        mutex_unlock(&vm->mutex);
-       return radeon_bo_reserve(bo_va->bo, false);
+       return 0;
 }
 
 /**
@@ -699,17 +693,15 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
        if (ib.length_dw != 0) {
                radeon_asic_vm_pad_ib(rdev, &ib);
 
-               radeon_semaphore_sync_resv(rdev, ib.semaphore, pd->tbo.resv, false);
-               radeon_semaphore_sync_fence(ib.semaphore, vm->last_id_use);
+               radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true);
                WARN_ON(ib.length_dw > ndw);
                r = radeon_ib_schedule(rdev, &ib, NULL, false);
                if (r) {
                        radeon_ib_free(rdev, &ib);
                        return r;
                }
-               radeon_fence_unref(&vm->fence);
-               vm->fence = radeon_fence_ref(ib.fence);
-               radeon_fence_unref(&vm->last_flush);
+               ib.fence->is_vm_update = true;
+               radeon_bo_fence(pd, ib.fence, false);
        }
        radeon_ib_free(rdev, &ib);
 
@@ -808,11 +800,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
  *
  * Global and local mutex must be locked!
  */
-static void radeon_vm_update_ptes(struct radeon_device *rdev,
-                                 struct radeon_vm *vm,
-                                 struct radeon_ib *ib,
-                                 uint64_t start, uint64_t end,
-                                 uint64_t dst, uint32_t flags)
+static int radeon_vm_update_ptes(struct radeon_device *rdev,
+                                struct radeon_vm *vm,
+                                struct radeon_ib *ib,
+                                uint64_t start, uint64_t end,
+                                uint64_t dst, uint32_t flags)
 {
        uint64_t mask = RADEON_VM_PTE_COUNT - 1;
        uint64_t last_pte = ~0, last_dst = ~0;
@@ -825,8 +817,12 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
                struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
                unsigned nptes;
                uint64_t pte;
+               int r;
 
-               radeon_semaphore_sync_resv(rdev, ib->semaphore, pt->tbo.resv, false);
+               radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
+               r = reservation_object_reserve_shared(pt->tbo.resv);
+               if (r)
+                       return r;
 
                if ((addr & ~mask) == (end & ~mask))
                        nptes = end - addr;
@@ -860,6 +856,33 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
                                    last_pte + 8 * count,
                                    last_dst, flags);
        }
+
+       return 0;
+}
+
+/**
+ * radeon_vm_fence_pts - fence page tables after an update
+ *
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @fence: fence to use
+ *
+ * Fence the page tables in the range @start - @end (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+static void radeon_vm_fence_pts(struct radeon_vm *vm,
+                               uint64_t start, uint64_t end,
+                               struct radeon_fence *fence)
+{
+       unsigned i;
+
+       start >>= radeon_vm_block_size;
+       end >>= radeon_vm_block_size;
+
+       for (i = start; i <= end; ++i)
+               radeon_bo_fence(vm->page_tables[i].bo, fence, true);
 }
 
 /**
@@ -892,7 +915,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
                return -EINVAL;
        }
 
+       spin_lock(&vm->status_lock);
        list_del_init(&bo_va->vm_status);
+       spin_unlock(&vm->status_lock);
 
        bo_va->flags &= ~RADEON_VM_PAGE_VALID;
        bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
@@ -961,23 +986,34 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
                return r;
        ib.length_dw = 0;
 
-       radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
-                             bo_va->it.last + 1, addr,
-                             radeon_vm_page_flags(bo_va->flags));
+       if (!(bo_va->flags & RADEON_VM_PAGE_VALID)) {
+               unsigned i;
+
+               for (i = 0; i < RADEON_NUM_RINGS; ++i)
+                       radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use);
+       }
+
+       r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
+                                 bo_va->it.last + 1, addr,
+                                 radeon_vm_page_flags(bo_va->flags));
+       if (r) {
+               radeon_ib_free(rdev, &ib);
+               return r;
+       }
 
        radeon_asic_vm_pad_ib(rdev, &ib);
        WARN_ON(ib.length_dw > ndw);
 
-       radeon_semaphore_sync_fence(ib.semaphore, vm->fence);
        r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                radeon_ib_free(rdev, &ib);
                return r;
        }
-       radeon_fence_unref(&vm->fence);
-       vm->fence = radeon_fence_ref(ib.fence);
+       ib.fence->is_vm_update = true;
+       radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence);
+       radeon_fence_unref(&bo_va->last_pt_update);
+       bo_va->last_pt_update = radeon_fence_ref(ib.fence);
        radeon_ib_free(rdev, &ib);
-       radeon_fence_unref(&vm->last_flush);
 
        return 0;
 }
@@ -996,16 +1032,25 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
 int radeon_vm_clear_freed(struct radeon_device *rdev,
                          struct radeon_vm *vm)
 {
-       struct radeon_bo_va *bo_va, *tmp;
+       struct radeon_bo_va *bo_va;
        int r;
 
-       list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+       spin_lock(&vm->status_lock);
+       while (!list_empty(&vm->freed)) {
+               bo_va = list_first_entry(&vm->freed,
+                       struct radeon_bo_va, vm_status);
+               spin_unlock(&vm->status_lock);
+
                r = radeon_vm_bo_update(rdev, bo_va, NULL);
                radeon_bo_unref(&bo_va->bo);
+               radeon_fence_unref(&bo_va->last_pt_update);
                kfree(bo_va);
                if (r)
                        return r;
+
+               spin_lock(&vm->status_lock);
        }
+       spin_unlock(&vm->status_lock);
        return 0;
 
 }
@@ -1024,14 +1069,23 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
 int radeon_vm_clear_invalids(struct radeon_device *rdev,
                             struct radeon_vm *vm)
 {
-       struct radeon_bo_va *bo_va, *tmp;
+       struct radeon_bo_va *bo_va;
        int r;
 
-       list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) {
+       spin_lock(&vm->status_lock);
+       while (!list_empty(&vm->invalidated)) {
+               bo_va = list_first_entry(&vm->invalidated,
+                       struct radeon_bo_va, vm_status);
+               spin_unlock(&vm->status_lock);
+
                r = radeon_vm_bo_update(rdev, bo_va, NULL);
                if (r)
                        return r;
+
+               spin_lock(&vm->status_lock);
        }
+       spin_unlock(&vm->status_lock);
+
        return 0;
 }
 
@@ -1054,14 +1108,17 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
 
        mutex_lock(&vm->mutex);
        interval_tree_remove(&bo_va->it, &vm->va);
+       spin_lock(&vm->status_lock);
        list_del(&bo_va->vm_status);
 
        if (bo_va->addr) {
                bo_va->bo = radeon_bo_ref(bo_va->bo);
                list_add(&bo_va->vm_status, &vm->freed);
        } else {
+               radeon_fence_unref(&bo_va->last_pt_update);
                kfree(bo_va);
        }
+       spin_unlock(&vm->status_lock);
 
        mutex_unlock(&vm->mutex);
 }
@@ -1082,10 +1139,10 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
 
        list_for_each_entry(bo_va, &bo->va, bo_list) {
                if (bo_va->addr) {
-                       mutex_lock(&bo_va->vm->mutex);
+                       spin_lock(&bo_va->vm->status_lock);
                        list_del(&bo_va->vm_status);
                        list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
-                       mutex_unlock(&bo_va->vm->mutex);
+                       spin_unlock(&bo_va->vm->status_lock);
                }
        }
 }
@@ -1103,15 +1160,17 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
        const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
                RADEON_VM_PTE_COUNT * 8);
        unsigned pd_size, pd_entries, pts_size;
-       int r;
+       int i, r;
 
-       vm->id = 0;
        vm->ib_bo_va = NULL;
-       vm->fence = NULL;
-       vm->last_flush = NULL;
-       vm->last_id_use = NULL;
+       for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+               vm->ids[i].id = 0;
+               vm->ids[i].flushed_updates = NULL;
+               vm->ids[i].last_id_use = NULL;
+       }
        mutex_init(&vm->mutex);
        vm->va = RB_ROOT;
+       spin_lock_init(&vm->status_lock);
        INIT_LIST_HEAD(&vm->invalidated);
        INIT_LIST_HEAD(&vm->freed);
 
@@ -1165,11 +1224,13 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
                if (!r) {
                        list_del_init(&bo_va->bo_list);
                        radeon_bo_unreserve(bo_va->bo);
+                       radeon_fence_unref(&bo_va->last_pt_update);
                        kfree(bo_va);
                }
        }
        list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
                radeon_bo_unref(&bo_va->bo);
+               radeon_fence_unref(&bo_va->last_pt_update);
                kfree(bo_va);
        }
 
@@ -1179,9 +1240,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
 
        radeon_bo_unref(&vm->page_directory);
 
-       radeon_fence_unref(&vm->fence);
-       radeon_fence_unref(&vm->last_flush);
-       radeon_fence_unref(&vm->last_id_use);
+       for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+               radeon_fence_unref(&vm->ids[i].flushed_updates);
+               radeon_fence_unref(&vm->ids[i].last_id_use);
+       }
 
        mutex_destroy(&vm->mutex);
 }
index 5f6db4629aaa4c04172fe092a0375a3a2a5d354f..9acb1c3c005b6ead68e940ba5443b88d3de0be0b 100644 (file)
@@ -879,6 +879,9 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
        u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
        /* FIXME: implement full support */
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        if (rdev->mode_info.crtcs[0]->base.enabled)
index 3462b64369bfe6142a4c8acfaec09e0bb7d8826c..0a2d36e8110838d059b73663e5e055052741272c 100644 (file)
@@ -579,6 +579,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
        u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
        u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        if (rdev->mode_info.crtcs[0]->base.enabled)
index 8a477bf1fdb31529173234f8f30a0b4e3fb9c608..c55d653aaf5f6bcfb0b80d24ad1c42025d36e267 100644 (file)
@@ -1277,6 +1277,9 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
        struct drm_display_mode *mode0 = NULL;
        struct drm_display_mode *mode1 = NULL;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        if (rdev->mode_info.crtcs[0]->base.enabled)
index 7f34bad2e724d6e657203cfde55f85ddb48fa8da..acff6e09cc40b767d30582624f9187e21d7aad2e 100644 (file)
@@ -44,31 +44,27 @@ struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
                                    unsigned num_gpu_pages,
                                    struct reservation_object *resv)
 {
-       struct radeon_semaphore *sem = NULL;
        struct radeon_fence *fence;
+       struct radeon_sync sync;
        int ring_index = rdev->asic->copy.dma_ring_index;
        struct radeon_ring *ring = &rdev->ring[ring_index];
        u32 size_in_dw, cur_size_in_dw;
        int i, num_loops;
        int r = 0;
 
-       r = radeon_semaphore_create(rdev, &sem);
-       if (r) {
-               DRM_ERROR("radeon: moving bo (%d).\n", r);
-               return ERR_PTR(r);
-       }
+       radeon_sync_create(&sync);
 
        size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
        num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
        r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
-               radeon_semaphore_free(rdev, &sem, NULL);
+               radeon_sync_free(rdev, &sync, NULL);
                return ERR_PTR(r);
        }
 
-       radeon_semaphore_sync_resv(rdev, sem, resv, false);
-       radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+       radeon_sync_resv(rdev, &sync, resv, false);
+       radeon_sync_rings(rdev, &sync, ring->idx);
 
        for (i = 0; i < num_loops; i++) {
                cur_size_in_dw = size_in_dw;
@@ -87,12 +83,12 @@ struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, &fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
-               radeon_semaphore_free(rdev, &sem, NULL);
+               radeon_sync_free(rdev, &sync, NULL);
                return ERR_PTR(r);
        }
 
        radeon_ring_unlock_commit(rdev, ring, false);
-       radeon_semaphore_free(rdev, &sem, fence);
+       radeon_sync_free(rdev, &sync, fence);
 
        return fence;
 }
index eeea5b6a1775ee002f36682b7d092ab3b449d913..60df444bd0756fbe42b5a5dab1072f47c5c73a3a 100644 (file)
@@ -2384,6 +2384,9 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
        u32 num_heads = 0, lb_size;
        int i;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        for (i = 0; i < rdev->num_crtc; i++) {
@@ -3362,6 +3365,7 @@ void si_fence_ring_emit(struct radeon_device *rdev,
 void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
        struct radeon_ring *ring = &rdev->ring[ib->ring];
+       unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
        u32 header;
 
        if (ib->is_const_ib) {
@@ -3397,14 +3401,13 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 #endif
                          (ib->gpu_addr & 0xFFFFFFFC));
        radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
-       radeon_ring_write(ring, ib->length_dw |
-                         (ib->vm ? (ib->vm->id << 24) : 0));
+       radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
 
        if (!ib->is_const_ib) {
                /* flush read cache over gart for this vmid */
                radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
                radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
-               radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
+               radeon_ring_write(ring, vm_id);
                radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
                radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
                                  PACKET3_TC_ACTION_ENA |
@@ -5020,27 +5023,23 @@ static void si_vm_decode_fault(struct radeon_device *rdev,
               block, mc_id);
 }
 
-void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+                unsigned vm_id, uint64_t pd_addr)
 {
-       struct radeon_ring *ring = &rdev->ring[ridx];
-
-       if (vm == NULL)
-               return;
-
        /* write new base address */
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
        radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
                                 WRITE_DATA_DST_SEL(0)));
 
-       if (vm->id < 8) {
+       if (vm_id < 8) {
                radeon_ring_write(ring,
-                                 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+                                 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
        } else {
                radeon_ring_write(ring,
-                                 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+                                 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
        }
        radeon_ring_write(ring, 0);
-       radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+       radeon_ring_write(ring, pd_addr >> 12);
 
        /* flush hdp cache */
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -5056,7 +5055,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
                                 WRITE_DATA_DST_SEL(0)));
        radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
        radeon_ring_write(ring, 0);
-       radeon_ring_write(ring, 1 << vm->id);
+       radeon_ring_write(ring, 1 << vm_id);
 
        /* sync PFP to ME, otherwise we might get invalid PFP reads */
        radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
index b58f12b762d797c3c4db418712987cf0da43718d..f5cc777e1c5f142ff00b383e3863b70b47a96b3e 100644 (file)
@@ -185,20 +185,17 @@ void si_dma_vm_set_pages(struct radeon_device *rdev,
        }
 }
 
-void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
-{
-       struct radeon_ring *ring = &rdev->ring[ridx];
-
-       if (vm == NULL)
-               return;
+void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+                    unsigned vm_id, uint64_t pd_addr)
 
+{
        radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
-       if (vm->id < 8) {
-               radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+       if (vm_id < 8) {
+               radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
        } else {
-               radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
+               radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2));
        }
-       radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+       radeon_ring_write(ring, pd_addr >> 12);
 
        /* flush hdp cache */
        radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
@@ -208,7 +205,7 @@ void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
        /* bits 0-7 are the VM contexts0-7 */
        radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
        radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
-       radeon_ring_write(ring, 1 << vm->id);
+       radeon_ring_write(ring, 1 << vm_id);
 }
 
 /**
@@ -229,31 +226,27 @@ struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
                                 unsigned num_gpu_pages,
                                 struct reservation_object *resv)
 {
-       struct radeon_semaphore *sem = NULL;
        struct radeon_fence *fence;
+       struct radeon_sync sync;
        int ring_index = rdev->asic->copy.dma_ring_index;
        struct radeon_ring *ring = &rdev->ring[ring_index];
        u32 size_in_bytes, cur_size_in_bytes;
        int i, num_loops;
        int r = 0;
 
-       r = radeon_semaphore_create(rdev, &sem);
-       if (r) {
-               DRM_ERROR("radeon: moving bo (%d).\n", r);
-               return ERR_PTR(r);
-       }
+       radeon_sync_create(&sync);
 
        size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
        num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
        r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
-               radeon_semaphore_free(rdev, &sem, NULL);
+               radeon_sync_free(rdev, &sync, NULL);
                return ERR_PTR(r);
        }
 
-       radeon_semaphore_sync_resv(rdev, sem, resv, false);
-       radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+       radeon_sync_resv(rdev, &sync, resv, false);
+       radeon_sync_rings(rdev, &sync, ring->idx);
 
        for (i = 0; i < num_loops; i++) {
                cur_size_in_bytes = size_in_bytes;
@@ -272,12 +265,12 @@ struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, &fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
-               radeon_semaphore_free(rdev, &sem, NULL);
+               radeon_sync_free(rdev, &sync, NULL);
                return ERR_PTR(r);
        }
 
        radeon_ring_unlock_commit(rdev, ring, false);
-       radeon_semaphore_free(rdev, &sem, fence);
+       radeon_sync_free(rdev, &sync, fence);
 
        return fence;
 }
index 676e6c2ba90a3159f1c888c063837fca77647376..32e354b8b0aba6b96cfc8e94b1519b7fca4f47fb 100644 (file)
@@ -3396,6 +3396,15 @@ static int si_process_firmware_header(struct radeon_device *rdev)
 
        si_pi->mc_reg_table_start = tmp;
 
+       ret = si_read_smc_sram_dword(rdev,
+                                    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                    SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
+                                    &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->fan_table_start = tmp;
+
        ret = si_read_smc_sram_dword(rdev,
                                     SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
                                     SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
@@ -5817,8 +5826,33 @@ void si_dpm_setup_asic(struct radeon_device *rdev)
        si_enable_acpi_power_management(rdev);
 }
 
-static int si_set_thermal_temperature_range(struct radeon_device *rdev,
-                                       int min_temp, int max_temp)
+static int si_thermal_enable_alert(struct radeon_device *rdev,
+                                  bool enable)
+{
+       u32 thermal_int = RREG32(CG_THERMAL_INT);
+
+       if (enable) {
+               PPSMC_Result result;
+
+               thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+               WREG32(CG_THERMAL_INT, thermal_int);
+               rdev->irq.dpm_thermal = false;
+               result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
+               if (result != PPSMC_Result_OK) {
+                       DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+                       return -EINVAL;
+               }
+       } else {
+               thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+               WREG32(CG_THERMAL_INT, thermal_int);
+               rdev->irq.dpm_thermal = true;
+       }
+
+       return 0;
+}
+
+static int si_thermal_set_temperature_range(struct radeon_device *rdev,
+                                           int min_temp, int max_temp)
 {
        int low_temp = 0 * 1000;
        int high_temp = 255 * 1000;
@@ -5842,6 +5876,309 @@ static int si_set_thermal_temperature_range(struct radeon_device *rdev,
        return 0;
 }
 
+static void si_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
+{
+       struct si_power_info *si_pi = si_get_pi(rdev);
+       u32 tmp;
+
+       if (si_pi->fan_ctrl_is_in_default_mode) {
+               tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+               si_pi->fan_ctrl_default_mode = tmp;
+               tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+               si_pi->t_min = tmp;
+               si_pi->fan_ctrl_is_in_default_mode = false;
+       }
+
+       tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+       tmp |= TMIN(0);
+       WREG32(CG_FDO_CTRL2, tmp);
+
+       tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+       tmp |= FDO_PWM_MODE(mode);
+       WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_setup_fan_table(struct radeon_device *rdev)
+{
+       struct si_power_info *si_pi = si_get_pi(rdev);
+       PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
+       u32 duty100;
+       u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+       u16 fdo_min, slope1, slope2;
+       u32 reference_clock, tmp;
+       int ret;
+       u64 tmp64;
+
+       if (!si_pi->fan_table_start) {
+               rdev->pm.dpm.fan.ucode_fan_control = false;
+               return 0;
+       }
+
+       duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+       if (duty100 == 0) {
+               rdev->pm.dpm.fan.ucode_fan_control = false;
+               return 0;
+       }
+
+       tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
+       do_div(tmp64, 10000);
+       fdo_min = (u16)tmp64;
+
+       t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
+       t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
+
+       pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
+       pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
+
+       slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+       slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+       fan_table.slope1 = cpu_to_be16(slope1);
+       fan_table.slope2 = cpu_to_be16(slope2);
+
+       fan_table.fdo_min = cpu_to_be16(fdo_min);
+
+       fan_table.hys_down = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
+
+       fan_table.hys_up = cpu_to_be16(1);
+
+       fan_table.hys_slope = cpu_to_be16(1);
+
+       fan_table.temp_resp_lim = cpu_to_be16(5);
+
+       reference_clock = radeon_get_xclk(rdev);
+
+       fan_table.refresh_period = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
+                                               reference_clock) / 1600);
+
+       fan_table.fdo_max = cpu_to_be16((u16)duty100);
+
+       tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+       fan_table.temp_src = (uint8_t)tmp;
+
+       ret = si_copy_bytes_to_smc(rdev,
+                                  si_pi->fan_table_start,
+                                  (u8 *)(&fan_table),
+                                  sizeof(fan_table),
+                                  si_pi->sram_end);
+
+       if (ret) {
+               DRM_ERROR("Failed to load fan table to the SMC.");
+               rdev->pm.dpm.fan.ucode_fan_control = false;
+       }
+
+       return 0;
+}
+
+static int si_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
+{
+       PPSMC_Result ret;
+
+       ret = si_send_msg_to_smc(rdev, PPSMC_StartFanControl);
+       if (ret == PPSMC_Result_OK)
+               return 0;
+       else
+               return -EINVAL;
+}
+
+static int si_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
+{
+       PPSMC_Result ret;
+
+       ret = si_send_msg_to_smc(rdev, PPSMC_StopFanControl);
+       if (ret == PPSMC_Result_OK)
+               return 0;
+       else
+               return -EINVAL;
+}
+
+#if 0
+static int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
+                                            u32 *speed)
+{
+       u32 duty, duty100;
+       u64 tmp64;
+
+       if (rdev->pm.no_fan)
+               return -ENOENT;
+
+       duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+       duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+
+       if (duty100 == 0)
+               return -EINVAL;
+
+       tmp64 = (u64)duty * 100;
+       do_div(tmp64, duty100);
+       *speed = (u32)tmp64;
+
+       if (*speed > 100)
+               *speed = 100;
+
+       return 0;
+}
+
+static int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
+                                            u32 speed)
+{
+       u32 tmp;
+       u32 duty, duty100;
+       u64 tmp64;
+
+       if (rdev->pm.no_fan)
+               return -ENOENT;
+
+       if (speed > 100)
+               return -EINVAL;
+
+       if (rdev->pm.dpm.fan.ucode_fan_control)
+               si_fan_ctrl_stop_smc_fan_control(rdev);
+
+       duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+       if (duty100 == 0)
+               return -EINVAL;
+
+       tmp64 = (u64)speed * duty100;
+       do_div(tmp64, 100);
+       duty = (u32)tmp64;
+
+       tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
+       tmp |= FDO_STATIC_DUTY(duty);
+       WREG32(CG_FDO_CTRL0, tmp);
+
+       si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+
+       return 0;
+}
+
+static int si_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
+                                        u32 *speed)
+{
+       u32 tach_period;
+       u32 xclk = radeon_get_xclk(rdev);
+
+       if (rdev->pm.no_fan)
+               return -ENOENT;
+
+       if (rdev->pm.fan_pulses_per_revolution == 0)
+               return -ENOENT;
+
+       tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+       if (tach_period == 0)
+               return -ENOENT;
+
+       *speed = 60 * xclk * 10000 / tach_period;
+
+       return 0;
+}
+
+static int si_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
+                                        u32 speed)
+{
+       u32 tach_period, tmp;
+       u32 xclk = radeon_get_xclk(rdev);
+
+       if (rdev->pm.no_fan)
+               return -ENOENT;
+
+       if (rdev->pm.fan_pulses_per_revolution == 0)
+               return -ENOENT;
+
+       if ((speed < rdev->pm.fan_min_rpm) ||
+           (speed > rdev->pm.fan_max_rpm))
+               return -EINVAL;
+
+       if (rdev->pm.dpm.fan.ucode_fan_control)
+               si_fan_ctrl_stop_smc_fan_control(rdev);
+
+       tach_period = 60 * xclk * 10000 / (8 * speed);
+       tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
+       tmp |= TARGET_PERIOD(tach_period);
+       WREG32(CG_TACH_CTRL, tmp);
+
+       si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
+
+       return 0;
+}
+#endif
+
+static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev)
+{
+       struct si_power_info *si_pi = si_get_pi(rdev);
+       u32 tmp;
+
+       if (!si_pi->fan_ctrl_is_in_default_mode) {
+               tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+               tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
+               WREG32(CG_FDO_CTRL2, tmp);
+
+               tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+               tmp |= TMIN(si_pi->t_min);
+               WREG32(CG_FDO_CTRL2, tmp);
+               si_pi->fan_ctrl_is_in_default_mode = true;
+       }
+}
+
+static void si_thermal_start_smc_fan_control(struct radeon_device *rdev)
+{
+       if (rdev->pm.dpm.fan.ucode_fan_control) {
+               si_fan_ctrl_start_smc_fan_control(rdev);
+               si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+       }
+}
+
+static void si_thermal_initialize(struct radeon_device *rdev)
+{
+       u32 tmp;
+
+       if (rdev->pm.fan_pulses_per_revolution) {
+               tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
+               tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
+               WREG32(CG_TACH_CTRL, tmp);
+       }
+
+       tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
+       tmp |= TACH_PWM_RESP_RATE(0x28);
+       WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_start_thermal_controller(struct radeon_device *rdev)
+{
+       int ret;
+
+       si_thermal_initialize(rdev);
+       ret = si_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+       if (ret)
+               return ret;
+       ret = si_thermal_enable_alert(rdev, true);
+       if (ret)
+               return ret;
+       if (rdev->pm.dpm.fan.ucode_fan_control) {
+               ret = si_halt_smc(rdev);
+               if (ret)
+                       return ret;
+               ret = si_thermal_setup_fan_table(rdev);
+               if (ret)
+                       return ret;
+               ret = si_resume_smc(rdev);
+               if (ret)
+                       return ret;
+               si_thermal_start_smc_fan_control(rdev);
+       }
+
+       return 0;
+}
+
+static void si_thermal_stop_thermal_controller(struct radeon_device *rdev)
+{
+       if (!rdev->pm.no_fan) {
+               si_fan_ctrl_set_default_mode(rdev);
+               si_fan_ctrl_stop_smc_fan_control(rdev);
+       }
+}
+
 int si_dpm_enable(struct radeon_device *rdev)
 {
        struct rv7xx_power_info *pi = rv770_get_pi(rdev);
@@ -5954,31 +6291,39 @@ int si_dpm_enable(struct radeon_device *rdev)
 
        si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
 
+       si_thermal_start_thermal_controller(rdev);
+
        ni_update_current_ps(rdev, boot_ps);
 
        return 0;
 }
 
-int si_dpm_late_enable(struct radeon_device *rdev)
+static int si_set_temperature_range(struct radeon_device *rdev)
 {
        int ret;
 
-       if (rdev->irq.installed &&
-           r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
-               PPSMC_Result result;
+       ret = si_thermal_enable_alert(rdev, false);
+       if (ret)
+               return ret;
+       ret = si_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+       if (ret)
+               return ret;
+       ret = si_thermal_enable_alert(rdev, true);
+       if (ret)
+               return ret;
 
-               ret = si_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
-               if (ret)
-                       return ret;
-               rdev->irq.dpm_thermal = true;
-               radeon_irq_set(rdev);
-               result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
+       return ret;
+}
 
-               if (result != PPSMC_Result_OK)
-                       DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
-       }
+int si_dpm_late_enable(struct radeon_device *rdev)
+{
+       int ret;
 
-       return 0;
+       ret = si_set_temperature_range(rdev);
+       if (ret)
+               return ret;
+
+       return ret;
 }
 
 void si_dpm_disable(struct radeon_device *rdev)
@@ -5988,6 +6333,7 @@ void si_dpm_disable(struct radeon_device *rdev)
 
        if (!si_is_smc_running(rdev))
                return;
+       si_thermal_stop_thermal_controller(rdev);
        si_disable_ulv(rdev);
        si_clear_vc(rdev);
        if (pi->thermal_protection)
@@ -6526,6 +6872,9 @@ int si_dpm_init(struct radeon_device *rdev)
                rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
                        rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
 
+       si_pi->fan_ctrl_is_in_default_mode = true;
+       rdev->pm.dpm.fan.ucode_fan_control = false;
+
        return 0;
 }
 
index 8b5c06a0832df96bfc45f876398529c1f7151c10..d16bb1b5f10f8490b64ed34030848b5732f719e8 100644 (file)
@@ -182,6 +182,7 @@ struct si_power_info {
        u32 dte_table_start;
        u32 spll_table_start;
        u32 papm_cfg_table_start;
+       u32 fan_table_start;
        /* CAC stuff */
        const struct si_cac_config_reg *cac_weights;
        const struct si_cac_config_reg *lcac_config;
@@ -197,6 +198,10 @@ struct si_power_info {
        /* SVI2 */
        u8 svd_gpio_id;
        u8 svc_gpio_id;
+       /* fan control */
+       bool fan_ctrl_is_in_default_mode;
+       u32 t_min;
+       u32 fan_ctrl_default_mode;
 };
 
 #define SISLANDS_INITIAL_STATE_ARB_INDEX    0
index 73dbc79c959d1f8093f035ac0dda87c561728429..e5bb92f16775296e5de6715daac954a78291897a 100644 (file)
@@ -135,7 +135,7 @@ void si_reset_smc(struct radeon_device *rdev)
 
 int si_program_jump_on_start(struct radeon_device *rdev)
 {
-       static u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
+       static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
 
        return si_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
 }
index 6635da9ec986f9b4eadfa50021f4f35cafe25a61..4069be89e5852852ff4e884630571908dda6e387 100644 (file)
 #define                DIG_THERM_DPM(x)                        ((x) << 14)
 #define                DIG_THERM_DPM_MASK                      0x003FC000
 #define                DIG_THERM_DPM_SHIFT                     14
-
+#define        CG_THERMAL_STATUS                               0x704
+#define                FDO_PWM_DUTY(x)                         ((x) << 9)
+#define                FDO_PWM_DUTY_MASK                       (0xff << 9)
+#define                FDO_PWM_DUTY_SHIFT                      9
 #define        CG_THERMAL_INT                                  0x708
 #define                DIG_THERM_INTH(x)                       ((x) << 8)
 #define                DIG_THERM_INTH_MASK                     0x0000FF00
 #define        THERM_INT_MASK_HIGH                     (1 << 24)
 #define        THERM_INT_MASK_LOW                      (1 << 25)
 
+#define        CG_MULT_THERMAL_CTRL                                    0x710
+#define                TEMP_SEL(x)                                     ((x) << 20)
+#define                TEMP_SEL_MASK                                   (0xff << 20)
+#define                TEMP_SEL_SHIFT                                  20
 #define        CG_MULT_THERMAL_STATUS                                  0x714
 #define                ASIC_MAX_TEMP(x)                                ((x) << 0)
 #define                ASIC_MAX_TEMP_MASK                              0x000001ff
 #define                CTF_TEMP_MASK                                   0x0003fe00
 #define                CTF_TEMP_SHIFT                                  9
 
+#define        CG_FDO_CTRL0                                    0x754
+#define                FDO_STATIC_DUTY(x)                      ((x) << 0)
+#define                FDO_STATIC_DUTY_MASK                    0x000000FF
+#define                FDO_STATIC_DUTY_SHIFT                   0
+#define        CG_FDO_CTRL1                                    0x758
+#define                FMAX_DUTY100(x)                         ((x) << 0)
+#define                FMAX_DUTY100_MASK                       0x000000FF
+#define                FMAX_DUTY100_SHIFT                      0
+#define        CG_FDO_CTRL2                                    0x75C
+#define                TMIN(x)                                 ((x) << 0)
+#define                TMIN_MASK                               0x000000FF
+#define                TMIN_SHIFT                              0
+#define                FDO_PWM_MODE(x)                         ((x) << 11)
+#define                FDO_PWM_MODE_MASK                       (7 << 11)
+#define                FDO_PWM_MODE_SHIFT                      11
+#define                TACH_PWM_RESP_RATE(x)                   ((x) << 25)
+#define                TACH_PWM_RESP_RATE_MASK                 (0x7f << 25)
+#define                TACH_PWM_RESP_RATE_SHIFT                25
+
+#define CG_TACH_CTRL                                    0x770
+#       define EDGE_PER_REV(x)                          ((x) << 0)
+#       define EDGE_PER_REV_MASK                        (0x7 << 0)
+#       define EDGE_PER_REV_SHIFT                       0
+#       define TARGET_PERIOD(x)                         ((x) << 3)
+#       define TARGET_PERIOD_MASK                       0xfffffff8
+#       define TARGET_PERIOD_SHIFT                      3
+#define CG_TACH_STATUS                                  0x774
+#       define TACH_PERIOD(x)                           ((x) << 0)
+#       define TACH_PERIOD_MASK                         0xffffffff
+#       define TACH_PERIOD_SHIFT                        0
+
 #define GENERAL_PWRMGT                                  0x780
 #       define GLOBAL_PWRMGT_EN                         (1 << 0)
 #       define STATIC_PM_EN                             (1 << 1)
index 623a0b1e2d9dbf017b7af5218996daabc9ec2ab6..3c779838d9ab16ad13a0588b1f69ac323509d933 100644 (file)
@@ -245,6 +245,31 @@ typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
 #define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd   0x11c
 #define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc   0x120
 
+struct PP_SIslands_FanTable
+{
+       uint8_t  fdo_mode;
+       uint8_t  padding;
+       int16_t  temp_min;
+       int16_t  temp_med;
+       int16_t  temp_max;
+       int16_t  slope1;
+       int16_t  slope2;
+       int16_t  fdo_min;
+       int16_t  hys_up;
+       int16_t  hys_down;
+       int16_t  hys_slope;
+       int16_t  temp_resp_lim;
+       int16_t  temp_curr;
+       int16_t  slope_curr;
+       int16_t  pwm_curr;
+       uint32_t refresh_period;
+       int16_t  fdo_max;
+       uint8_t  temp_src;
+       int8_t  padding2;
+};
+
+typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
+
 #define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
 #define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
 
index 82f70c90a9ee9623991fd538bafbaddb5cc1a53c..0b0b404ff0916a9e72f241400c113b63b43d4a0f 100644 (file)
@@ -431,6 +431,31 @@ struct SMU7_Discrete_MCRegisters
 
 typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters;
 
+struct SMU7_Discrete_FanTable
+{
+       uint16_t FdoMode;
+       int16_t  TempMin;
+       int16_t  TempMed;
+       int16_t  TempMax;
+       int16_t  Slope1;
+       int16_t  Slope2;
+       int16_t  FdoMin;
+       int16_t  HystUp;
+       int16_t  HystDown;
+       int16_t  HystSlope;
+       int16_t  TempRespLim;
+       int16_t  TempCurr;
+       int16_t  SlopeCurr;
+       int16_t  PwmCurr;
+       uint32_t RefreshPeriod;
+       int16_t  FdoMax;
+       uint8_t  TempSrc;
+       int8_t   Padding;
+};
+
+typedef struct SMU7_Discrete_FanTable SMU7_Discrete_FanTable;
+
+
 struct SMU7_Discrete_PmFuses {
   // dw0-dw1
   uint8_t BapmVddCVidHiSidd[8];
@@ -462,7 +487,10 @@ struct SMU7_Discrete_PmFuses {
   uint8_t BapmVddCVidHiSidd2[8];
 
   // dw11-dw12
-  uint32_t Reserved6[2];
+  int16_t FuzzyFan_ErrorSetDelta;
+  int16_t FuzzyFan_ErrorRateSetDelta;
+  int16_t FuzzyFan_PwmSetDelta;
+  uint16_t CalcMeasPowerBlend;
 
   // dw13-dw16
   uint8_t GnbLPML[16];
index c96f6089f8bf195377a933a8b1632565ebe49930..2324a526de6516ecbde0b77fe6490ef4b63e0b36 100644 (file)
@@ -11,10 +11,17 @@ config DRM_RCAR_DU
          Choose this option if you have an R-Car chipset.
          If M is selected the module will be called rcar-du-drm.
 
+config DRM_RCAR_HDMI
+       bool "R-Car DU HDMI Encoder Support"
+       depends on DRM_RCAR_DU
+       depends on OF
+       help
+         Enable support for external HDMI encoders.
+
 config DRM_RCAR_LVDS
        bool "R-Car DU LVDS Encoder Support"
        depends on DRM_RCAR_DU
        depends on ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST
        help
-         Enable support the R-Car Display Unit embedded LVDS encoders
-         (currently only on R8A7790).
+         Enable support for the R-Car Display Unit embedded LVDS encoders
+         (currently only on R8A7790 and R8A7791).
index 12b8d447783538518e491d34cfca1fc87a9a3683..05de1c4097af92192bbb28ee5c7a704775a9b1db 100644 (file)
@@ -7,6 +7,8 @@ rcar-du-drm-y := rcar_du_crtc.o \
                 rcar_du_plane.o \
                 rcar_du_vgacon.o
 
+rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI)    += rcar_du_hdmicon.o \
+                                          rcar_du_hdmienc.o
 rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS)    += rcar_du_lvdsenc.o
 
 obj-$(CONFIG_DRM_RCAR_DU)              += rcar-du-drm.o
index 148b505891813f65dee9019e24741fd003c4b41c..23cc910951f430a279b6b01ae88e5f03d7de3e74 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
 
 #include "rcar_du_crtc.h"
 #include "rcar_du_drv.h"
@@ -585,7 +586,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
 
        if (irq < 0) {
                dev_err(rcdu->dev, "no IRQ for CRTC %u\n", index);
-               return ret;
+               return irq;
        }
 
        ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
index e97ae502dec5ffaa50311d84f96d66b17e69ea8b..984e6083699fe380a1d6b1e9d9c4c249ee6d4167 100644 (file)
@@ -15,7 +15,6 @@
 #define __RCAR_DU_CRTC_H__
 
 #include <linux/mutex.h>
-#include <linux/platform_data/rcar-du.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
@@ -41,6 +40,15 @@ struct rcar_du_crtc {
 
 #define to_rcar_crtc(c)        container_of(c, struct rcar_du_crtc, crtc)
 
+enum rcar_du_output {
+       RCAR_DU_OUTPUT_DPAD0,
+       RCAR_DU_OUTPUT_DPAD1,
+       RCAR_DU_OUTPUT_LVDS0,
+       RCAR_DU_OUTPUT_LVDS1,
+       RCAR_DU_OUTPUT_TCON,
+       RCAR_DU_OUTPUT_MAX,
+};
+
 int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
 void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
 void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
index d212efa6a49510db6b51f60a77357e5b6c5e09a9..967ae8f20233ed163540ff9117fb7a3daae2750a 100644 (file)
@@ -146,12 +146,11 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
 {
        struct platform_device *pdev = dev->platformdev;
        struct device_node *np = pdev->dev.of_node;
-       struct rcar_du_platform_data *pdata = pdev->dev.platform_data;
        struct rcar_du_device *rcdu;
        struct resource *mem;
        int ret;
 
-       if (pdata == NULL && np == NULL) {
+       if (np == NULL) {
                dev_err(dev->dev, "no platform data\n");
                return -ENODEV;
        }
@@ -163,7 +162,6 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
        }
 
        rcdu->dev = &pdev->dev;
-       rcdu->pdata = pdata;
        rcdu->info = np ? of_match_device(rcar_du_of_table, rcdu->dev)->data
                   : (void *)platform_get_device_id(pdev)->driver_data;
        rcdu->ddev = dev;
index 8e494633c3b36325b99833dfd7ed294cab975962..0a724669f02d461351617c5044b4e7e89f7262da 100644 (file)
@@ -15,7 +15,6 @@
 #define __RCAR_DU_DRV_H__
 
 #include <linux/kernel.h>
-#include <linux/platform_data/rcar-du.h>
 
 #include "rcar_du_crtc.h"
 #include "rcar_du_group.h"
@@ -67,7 +66,6 @@ struct rcar_du_device_info {
 
 struct rcar_du_device {
        struct device *dev;
-       const struct rcar_du_platform_data *pdata;
        const struct rcar_du_device_info *info;
 
        void __iomem *mmio;
index 7c0ec95915eff1b1a46fb0319f5ae7eea353c26a..34a122a3966419e406cb958ce8a6b194f9fdae1f 100644 (file)
@@ -19,6 +19,8 @@
 
 #include "rcar_du_drv.h"
 #include "rcar_du_encoder.h"
+#include "rcar_du_hdmicon.h"
+#include "rcar_du_hdmienc.h"
 #include "rcar_du_kms.h"
 #include "rcar_du_lvdscon.h"
 #include "rcar_du_lvdsenc.h"
@@ -33,7 +35,7 @@ rcar_du_connector_best_encoder(struct drm_connector *connector)
 {
        struct rcar_du_connector *rcon = to_rcar_connector(connector);
 
-       return &rcon->encoder->encoder;
+       return rcar_encoder_to_drm_encoder(rcon->encoder);
 }
 
 /* -----------------------------------------------------------------------------
@@ -142,10 +144,11 @@ static const struct drm_encoder_funcs encoder_funcs = {
 int rcar_du_encoder_init(struct rcar_du_device *rcdu,
                         enum rcar_du_encoder_type type,
                         enum rcar_du_output output,
-                        const struct rcar_du_encoder_data *data,
-                        struct device_node *np)
+                        struct device_node *enc_node,
+                        struct device_node *con_node)
 {
        struct rcar_du_encoder *renc;
+       struct drm_encoder *encoder;
        unsigned int encoder_type;
        int ret;
 
@@ -154,6 +157,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
                return -ENOMEM;
 
        renc->output = output;
+       encoder = rcar_encoder_to_drm_encoder(renc);
 
        switch (output) {
        case RCAR_DU_OUTPUT_LVDS0:
@@ -175,6 +179,9 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
        case RCAR_DU_ENCODER_LVDS:
                encoder_type = DRM_MODE_ENCODER_LVDS;
                break;
+       case RCAR_DU_ENCODER_HDMI:
+               encoder_type = DRM_MODE_ENCODER_TMDS;
+               break;
        case RCAR_DU_ENCODER_NONE:
        default:
                /* No external encoder, use the internal encoder type. */
@@ -182,23 +189,35 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
                break;
        }
 
-       ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
-                              encoder_type);
-       if (ret < 0)
-               return ret;
+       if (type == RCAR_DU_ENCODER_HDMI) {
+               if (renc->lvds) {
+                       dev_err(rcdu->dev,
+                               "Chaining LVDS and HDMI encoders not supported\n");
+                       return -EINVAL;
+               }
 
-       drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
+               ret = rcar_du_hdmienc_init(rcdu, renc, enc_node);
+               if (ret < 0)
+                       return ret;
+       } else {
+               ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
+                                      encoder_type);
+               if (ret < 0)
+                       return ret;
 
-       switch (encoder_type) {
-       case DRM_MODE_ENCODER_LVDS: {
-               const struct rcar_du_panel_data *pdata =
-                       data ? &data->connector.lvds.panel : NULL;
-               return rcar_du_lvds_connector_init(rcdu, renc, pdata, np);
+               drm_encoder_helper_add(encoder, &encoder_helper_funcs);
        }
 
+       switch (encoder_type) {
+       case DRM_MODE_ENCODER_LVDS:
+               return rcar_du_lvds_connector_init(rcdu, renc, con_node);
+
        case DRM_MODE_ENCODER_DAC:
                return rcar_du_vga_connector_init(rcdu, renc);
 
+       case DRM_MODE_ENCODER_TMDS:
+               return rcar_du_hdmi_connector_init(rcdu, renc);
+
        default:
                return -EINVAL;
        }
index bd624135ef1fbfed3257bc4377b1e17a441b2021..719b6f2a031ccf0756ebd718307196b23d3f22b6 100644 (file)
 #ifndef __RCAR_DU_ENCODER_H__
 #define __RCAR_DU_ENCODER_H__
 
-#include <linux/platform_data/rcar-du.h>
-
 #include <drm/drm_crtc.h>
+#include <drm/drm_encoder_slave.h>
 
 struct rcar_du_device;
+struct rcar_du_hdmienc;
 struct rcar_du_lvdsenc;
 
+enum rcar_du_encoder_type {
+       RCAR_DU_ENCODER_UNUSED = 0,
+       RCAR_DU_ENCODER_NONE,
+       RCAR_DU_ENCODER_VGA,
+       RCAR_DU_ENCODER_LVDS,
+       RCAR_DU_ENCODER_HDMI,
+};
+
 struct rcar_du_encoder {
-       struct drm_encoder encoder;
+       struct drm_encoder_slave slave;
        enum rcar_du_output output;
+       struct rcar_du_hdmienc *hdmi;
        struct rcar_du_lvdsenc *lvds;
 };
 
 #define to_rcar_encoder(e) \
-       container_of(e, struct rcar_du_encoder, encoder)
+       container_of(e, struct rcar_du_encoder, slave.base)
+
+#define rcar_encoder_to_drm_encoder(e) (&(e)->slave.base)
 
 struct rcar_du_connector {
        struct drm_connector connector;
@@ -44,7 +55,7 @@ rcar_du_connector_best_encoder(struct drm_connector *connector);
 int rcar_du_encoder_init(struct rcar_du_device *rcdu,
                         enum rcar_du_encoder_type type,
                         enum rcar_du_output output,
-                        const struct rcar_du_encoder_data *data,
-                        struct device_node *np);
+                        struct device_node *enc_node,
+                        struct device_node *con_node);
 
 #endif /* __RCAR_DU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
new file mode 100644 (file)
index 0000000..4d7d4dd
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * R-Car Display Unit HDMI Connector
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
+#include "rcar_du_hdmicon.h"
+#include "rcar_du_kms.h"
+
+#define to_slave_funcs(e)      (to_rcar_encoder(e)->slave.slave_funcs)
+
+static int rcar_du_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+       struct rcar_du_connector *con = to_rcar_connector(connector);
+       struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
+       struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+       if (sfuncs->get_modes == NULL)
+               return 0;
+
+       return sfuncs->get_modes(encoder, connector);
+}
+
+static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector,
+                                            struct drm_display_mode *mode)
+{
+       struct rcar_du_connector *con = to_rcar_connector(connector);
+       struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
+       struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+       if (sfuncs->mode_valid == NULL)
+               return MODE_OK;
+
+       return sfuncs->mode_valid(encoder, mode);
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+       .get_modes = rcar_du_hdmi_connector_get_modes,
+       .mode_valid = rcar_du_hdmi_connector_mode_valid,
+       .best_encoder = rcar_du_connector_best_encoder,
+};
+
+static void rcar_du_hdmi_connector_destroy(struct drm_connector *connector)
+{
+       drm_connector_unregister(connector);
+       drm_connector_cleanup(connector);
+}
+
+static enum drm_connector_status
+rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+       struct rcar_du_connector *con = to_rcar_connector(connector);
+       struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
+       struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+       if (sfuncs->detect == NULL)
+               return connector_status_unknown;
+
+       return sfuncs->detect(encoder, connector);
+}
+
+static const struct drm_connector_funcs connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .detect = rcar_du_hdmi_connector_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = rcar_du_hdmi_connector_destroy,
+};
+
+int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
+                               struct rcar_du_encoder *renc)
+{
+       struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
+       struct rcar_du_connector *rcon;
+       struct drm_connector *connector;
+       int ret;
+
+       rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL);
+       if (rcon == NULL)
+               return -ENOMEM;
+
+       connector = &rcon->connector;
+       connector->display_info.width_mm = 0;
+       connector->display_info.height_mm = 0;
+
+       ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
+                                DRM_MODE_CONNECTOR_HDMIA);
+       if (ret < 0)
+               return ret;
+
+       drm_connector_helper_add(connector, &connector_helper_funcs);
+       ret = drm_connector_register(connector);
+       if (ret < 0)
+               return ret;
+
+       drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+       drm_object_property_set_value(&connector->base,
+               rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
+
+       ret = drm_mode_connector_attach_encoder(connector, encoder);
+       if (ret < 0)
+               return ret;
+
+       connector->encoder = encoder;
+       rcon->encoder = renc;
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h
new file mode 100644 (file)
index 0000000..87daa94
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * R-Car Display Unit HDMI Connector
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_HDMICON_H__
+#define __RCAR_DU_HDMICON_H__
+
+struct rcar_du_device;
+struct rcar_du_encoder;
+
+#if IS_ENABLED(CONFIG_DRM_RCAR_HDMI)
+int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
+                               struct rcar_du_encoder *renc);
+#else
+static inline int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
+                                             struct rcar_du_encoder *renc)
+{
+       return -ENOSYS;
+}
+#endif
+
+#endif /* __RCAR_DU_HDMICON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
new file mode 100644 (file)
index 0000000..359bc99
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * R-Car Display Unit HDMI Encoder
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
+#include "rcar_du_hdmienc.h"
+
+struct rcar_du_hdmienc {
+       struct rcar_du_encoder *renc;
+       struct device *dev;
+       int dpms;
+};
+
+#define to_rcar_hdmienc(e)     (to_rcar_encoder(e)->hdmi)
+#define to_slave_funcs(e)      (to_rcar_encoder(e)->slave.slave_funcs)
+
+static void rcar_du_hdmienc_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
+       struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+       if (hdmienc->dpms == mode)
+               return;
+
+       if (sfuncs->dpms)
+               sfuncs->dpms(encoder, mode);
+
+       hdmienc->dpms = mode;
+}
+
+static bool rcar_du_hdmienc_mode_fixup(struct drm_encoder *encoder,
+                                      const struct drm_display_mode *mode,
+                                      struct drm_display_mode *adjusted_mode)
+{
+       struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+       if (sfuncs->mode_fixup == NULL)
+               return true;
+
+       return sfuncs->mode_fixup(encoder, mode, adjusted_mode);
+}
+
+static void rcar_du_hdmienc_mode_prepare(struct drm_encoder *encoder)
+{
+       rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void rcar_du_hdmienc_mode_commit(struct drm_encoder *encoder)
+{
+       rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
+                                    struct drm_display_mode *mode,
+                                    struct drm_display_mode *adjusted_mode)
+{
+       struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
+       struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+       if (sfuncs->mode_set)
+               sfuncs->mode_set(encoder, mode, adjusted_mode);
+
+       rcar_du_crtc_route_output(encoder->crtc, hdmienc->renc->output);
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+       .dpms = rcar_du_hdmienc_dpms,
+       .mode_fixup = rcar_du_hdmienc_mode_fixup,
+       .prepare = rcar_du_hdmienc_mode_prepare,
+       .commit = rcar_du_hdmienc_mode_commit,
+       .mode_set = rcar_du_hdmienc_mode_set,
+};
+
+static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder)
+{
+       struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
+
+       rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+       drm_encoder_cleanup(encoder);
+       put_device(hdmienc->dev);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+       .destroy = rcar_du_hdmienc_cleanup,
+};
+
+int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
+                        struct rcar_du_encoder *renc, struct device_node *np)
+{
+       struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
+       struct drm_i2c_encoder_driver *driver;
+       struct i2c_client *i2c_slave;
+       struct rcar_du_hdmienc *hdmienc;
+       int ret;
+
+       hdmienc = devm_kzalloc(rcdu->dev, sizeof(*hdmienc), GFP_KERNEL);
+       if (hdmienc == NULL)
+               return -ENOMEM;
+
+       /* Locate the slave I2C device and driver. */
+       i2c_slave = of_find_i2c_device_by_node(np);
+       if (!i2c_slave || !i2c_get_clientdata(i2c_slave))
+               return -EPROBE_DEFER;
+
+       hdmienc->dev = &i2c_slave->dev;
+
+       if (hdmienc->dev->driver == NULL) {
+               ret = -EPROBE_DEFER;
+               goto error;
+       }
+
+       /* Initialize the slave encoder. */
+       driver = to_drm_i2c_encoder_driver(to_i2c_driver(hdmienc->dev->driver));
+       ret = driver->encoder_init(i2c_slave, rcdu->ddev, &renc->slave);
+       if (ret < 0)
+               goto error;
+
+       ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
+                              DRM_MODE_ENCODER_TMDS);
+       if (ret < 0)
+               goto error;
+
+       drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+
+       renc->hdmi = hdmienc;
+       hdmienc->renc = renc;
+
+       return 0;
+
+error:
+       put_device(hdmienc->dev);
+       return ret;
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h
new file mode 100644 (file)
index 0000000..2ff0128
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * R-Car Display Unit HDMI Encoder
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_HDMIENC_H__
+#define __RCAR_DU_HDMIENC_H__
+
+#include <linux/module.h>
+
+struct device_node;
+struct rcar_du_device;
+struct rcar_du_encoder;
+
+#if IS_ENABLED(CONFIG_DRM_RCAR_HDMI)
+int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
+                        struct rcar_du_encoder *renc, struct device_node *np);
+#else
+static inline int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
+                                      struct rcar_du_encoder *renc,
+                                      struct device_node *np)
+{
+       return -ENOSYS;
+}
+#endif
+
+#endif /* __RCAR_DU_HDMIENC_H__ */
index 6c24ad7d03ef300077cc992b9d3b95fccec2cac1..0c5ee616b5a3aabd12234ee7f208dc277a99d73f 100644 (file)
@@ -126,9 +126,9 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
        else
                align = 16 * args->bpp / 8;
 
-       args->pitch = roundup(max(args->pitch, min_pitch), align);
+       args->pitch = roundup(min_pitch, align);
 
-       return drm_gem_cma_dumb_create(file, dev, args);
+       return drm_gem_cma_dumb_create_internal(file, dev, args);
 }
 
 static struct drm_framebuffer *
@@ -190,49 +190,16 @@ static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
        .output_poll_changed = rcar_du_output_poll_changed,
 };
 
-static int rcar_du_encoders_init_pdata(struct rcar_du_device *rcdu)
-{
-       unsigned int num_encoders = 0;
-       unsigned int i;
-       int ret;
-
-       for (i = 0; i < rcdu->pdata->num_encoders; ++i) {
-               const struct rcar_du_encoder_data *pdata =
-                       &rcdu->pdata->encoders[i];
-               const struct rcar_du_output_routing *route =
-                       &rcdu->info->routes[pdata->output];
-
-               if (pdata->type == RCAR_DU_ENCODER_UNUSED)
-                       continue;
-
-               if (pdata->output >= RCAR_DU_OUTPUT_MAX ||
-                   route->possible_crtcs == 0) {
-                       dev_warn(rcdu->dev,
-                                "encoder %u references unexisting output %u, skipping\n",
-                                i, pdata->output);
-                       continue;
-               }
-
-               ret = rcar_du_encoder_init(rcdu, pdata->type, pdata->output,
-                                          pdata, NULL);
-               if (ret < 0)
-                       return ret;
-
-               num_encoders++;
-       }
-
-       return num_encoders;
-}
-
-static int rcar_du_encoders_init_dt_one(struct rcar_du_device *rcdu,
-                                       enum rcar_du_output output,
-                                       struct of_endpoint *ep)
+static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
+                                    enum rcar_du_output output,
+                                    struct of_endpoint *ep)
 {
        static const struct {
                const char *compatible;
                enum rcar_du_encoder_type type;
        } encoders[] = {
                { "adi,adv7123", RCAR_DU_ENCODER_VGA },
+               { "adi,adv7511w", RCAR_DU_ENCODER_HDMI },
                { "thine,thc63lvdm83d", RCAR_DU_ENCODER_LVDS },
        };
 
@@ -323,14 +290,14 @@ static int rcar_du_encoders_init_dt_one(struct rcar_du_device *rcdu,
                connector = entity;
        }
 
-       ret = rcar_du_encoder_init(rcdu, enc_type, output, NULL, connector);
+       ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
        of_node_put(encoder);
        of_node_put(connector);
 
        return ret < 0 ? ret : 1;
 }
 
-static int rcar_du_encoders_init_dt(struct rcar_du_device *rcdu)
+static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
 {
        struct device_node *np = rcdu->dev->of_node;
        struct device_node *prev = NULL;
@@ -377,7 +344,7 @@ static int rcar_du_encoders_init_dt(struct rcar_du_device *rcdu)
                }
 
                /* Process the output pipeline. */
-               ret = rcar_du_encoders_init_dt_one(rcdu, output, &ep);
+               ret = rcar_du_encoders_init_one(rcdu, output, &ep);
                if (ret < 0) {
                        of_node_put(ep_node);
                        return ret;
@@ -442,11 +409,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
        if (ret < 0)
                return ret;
 
-       if (rcdu->pdata)
-               ret = rcar_du_encoders_init_pdata(rcdu);
-       else
-               ret = rcar_du_encoders_init_dt(rcdu);
-
+       ret = rcar_du_encoders_init(rcdu);
        if (ret < 0)
                return ret;
 
index 115eed20db12abc421eddf7a20db0707c291c4a5..6d9811c052c4bce4d41ea61232fd99aaa71d6aa9 100644 (file)
 struct rcar_du_lvds_connector {
        struct rcar_du_connector connector;
 
-       struct rcar_du_panel_data panel;
+       struct {
+               unsigned int width_mm;          /* Panel width in mm */
+               unsigned int height_mm;         /* Panel height in mm */
+               struct videomode mode;
+       } panel;
 };
 
 #define to_rcar_lvds_connector(c) \
@@ -78,31 +82,26 @@ static const struct drm_connector_funcs connector_funcs = {
 
 int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
                                struct rcar_du_encoder *renc,
-                               const struct rcar_du_panel_data *panel,
                                /* TODO const */ struct device_node *np)
 {
+       struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
        struct rcar_du_lvds_connector *lvdscon;
        struct drm_connector *connector;
+       struct display_timing timing;
        int ret;
 
        lvdscon = devm_kzalloc(rcdu->dev, sizeof(*lvdscon), GFP_KERNEL);
        if (lvdscon == NULL)
                return -ENOMEM;
 
-       if (panel) {
-               lvdscon->panel = *panel;
-       } else {
-               struct display_timing timing;
-
-               ret = of_get_display_timing(np, "panel-timing", &timing);
-               if (ret < 0)
-                       return ret;
+       ret = of_get_display_timing(np, "panel-timing", &timing);
+       if (ret < 0)
+               return ret;
 
-               videomode_from_timing(&timing, &lvdscon->panel.mode);
+       videomode_from_timing(&timing, &lvdscon->panel.mode);
 
-               of_property_read_u32(np, "width-mm", &lvdscon->panel.width_mm);
-               of_property_read_u32(np, "height-mm", &lvdscon->panel.height_mm);
-       }
+       of_property_read_u32(np, "width-mm", &lvdscon->panel.width_mm);
+       of_property_read_u32(np, "height-mm", &lvdscon->panel.height_mm);
 
        connector = &lvdscon->connector.connector;
        connector->display_info.width_mm = lvdscon->panel.width_mm;
@@ -122,11 +121,11 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
        drm_object_property_set_value(&connector->base,
                rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
 
-       ret = drm_mode_connector_attach_encoder(connector, &renc->encoder);
+       ret = drm_mode_connector_attach_encoder(connector, encoder);
        if (ret < 0)
                return ret;
 
-       connector->encoder = &renc->encoder;
+       connector->encoder = encoder;
        lvdscon->connector.encoder = renc;
 
        return 0;
index d11424d537f90334fe30e9dd7ddd8faf430d9790..d4881ee0be7e8aa4df09f5469717592ec0e8c627 100644 (file)
 
 struct rcar_du_device;
 struct rcar_du_encoder;
-struct rcar_du_panel_data;
 
 int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
                                struct rcar_du_encoder *renc,
-                               const struct rcar_du_panel_data *panel,
                                struct device_node *np);
 
 #endif /* __RCAR_DU_LVDSCON_H__ */
index 3303a55cec79dcfb28b4dff7e53d6ac5e4c1fb2b..f65aabda0796bbc62aa9af0326d2dccafd187067 100644 (file)
@@ -16,7 +16,6 @@
 
 #include <linux/io.h>
 #include <linux/module.h>
-#include <linux/platform_data/rcar-du.h>
 
 struct rcar_drm_crtc;
 struct rcar_du_lvdsenc;
index 564a723ede0374e5def036e085b009bef4b1a954..752747a5e920e847ac689aa78555da1c12716b9e 100644 (file)
@@ -52,6 +52,7 @@ static const struct drm_connector_funcs connector_funcs = {
 int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
                               struct rcar_du_encoder *renc)
 {
+       struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
        struct rcar_du_connector *rcon;
        struct drm_connector *connector;
        int ret;
@@ -78,11 +79,11 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
        drm_object_property_set_value(&connector->base,
                rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
 
-       ret = drm_mode_connector_attach_encoder(connector, &renc->encoder);
+       ret = drm_mode_connector_attach_encoder(connector, encoder);
        if (ret < 0)
                return ret;
 
-       connector->encoder = &renc->encoder;
+       connector->encoder = encoder;
        rcon->encoder = renc;
 
        return 0;
index 0ddce4d046d977549650c62de63e559cb999e4ec..859ccb658601e9ca476bbc6e1c5db50c4bbc8160 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
 
 #include <video/sh_mobile_meram.h>
 
index d2ae0c0e13be6695a109bdea7ea107a35c8654f3..36a1ad3c482319e288c539bcbf7dfbe27a72074d 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
 
 #include "sti_compositor.h"
 #include "sti_drm_drv.h"
index 354ddb29231f26e67e6ac9ec3a638358b47923c4..74d9d621453da3f341227933840fb0cfe41f906e 100644 (file)
@@ -1,6 +1,7 @@
 config DRM_TEGRA
        tristate "NVIDIA Tegra DRM"
        depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
+       depends on COMMON_CLK
        depends on DRM
        depends on RESET_CONTROLLER
        select DRM_KMS_HELPER
index 6553fd238685e459deb0676114bfd9cac04f7e50..3367960286a6f162455e0363a54dace2c6882883 100644 (file)
@@ -9,17 +9,23 @@
 
 #include <linux/clk.h>
 #include <linux/debugfs.h>
+#include <linux/iommu.h>
 #include <linux/reset.h>
 
+#include <soc/tegra/pmc.h>
+
 #include "dc.h"
 #include "drm.h"
 #include "gem.h"
 
+#include <drm/drm_plane_helper.h>
+
 struct tegra_dc_soc_info {
        bool supports_interlacing;
        bool supports_cursor;
        bool supports_block_linear;
        unsigned int pitch_align;
+       bool has_powergate;
 };
 
 struct tegra_plane {
@@ -32,6 +38,26 @@ static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
        return container_of(plane, struct tegra_plane, base);
 }
 
+static void tegra_dc_window_commit(struct tegra_dc *dc, unsigned int index)
+{
+       u32 value = WIN_A_ACT_REQ << index;
+
+       tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
+       tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+}
+
+static void tegra_dc_cursor_commit(struct tegra_dc *dc)
+{
+       tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+       tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
+}
+
+static void tegra_dc_commit(struct tegra_dc *dc)
+{
+       tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+       tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+}
+
 static unsigned int tegra_dc_format(uint32_t format, uint32_t *swap)
 {
        /* assume no swapping of fetched data */
@@ -303,17 +329,260 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
                break;
        }
 
-       tegra_dc_writel(dc, WIN_A_UPDATE << index, DC_CMD_STATE_CONTROL);
-       tegra_dc_writel(dc, WIN_A_ACT_REQ << index, DC_CMD_STATE_CONTROL);
+       tegra_dc_window_commit(dc, index);
+
+       return 0;
+}
+
+static int tegra_window_plane_disable(struct drm_plane *plane)
+{
+       struct tegra_dc *dc = to_tegra_dc(plane->crtc);
+       struct tegra_plane *p = to_tegra_plane(plane);
+       u32 value;
+
+       if (!plane->crtc)
+               return 0;
+
+       value = WINDOW_A_SELECT << p->index;
+       tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+       value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+       value &= ~WIN_ENABLE;
+       tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+       tegra_dc_window_commit(dc, p->index);
+
+       return 0;
+}
+
+static void tegra_plane_destroy(struct drm_plane *plane)
+{
+       struct tegra_plane *p = to_tegra_plane(plane);
+
+       drm_plane_cleanup(plane);
+       kfree(p);
+}
+
+static const u32 tegra_primary_plane_formats[] = {
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_RGB565,
+};
+
+static int tegra_primary_plane_update(struct drm_plane *plane,
+                                     struct drm_crtc *crtc,
+                                     struct drm_framebuffer *fb, int crtc_x,
+                                     int crtc_y, unsigned int crtc_w,
+                                     unsigned int crtc_h, uint32_t src_x,
+                                     uint32_t src_y, uint32_t src_w,
+                                     uint32_t src_h)
+{
+       struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
+       struct tegra_plane *p = to_tegra_plane(plane);
+       struct tegra_dc *dc = to_tegra_dc(crtc);
+       struct tegra_dc_window window;
+       int err;
+
+       memset(&window, 0, sizeof(window));
+       window.src.x = src_x >> 16;
+       window.src.y = src_y >> 16;
+       window.src.w = src_w >> 16;
+       window.src.h = src_h >> 16;
+       window.dst.x = crtc_x;
+       window.dst.y = crtc_y;
+       window.dst.w = crtc_w;
+       window.dst.h = crtc_h;
+       window.format = tegra_dc_format(fb->pixel_format, &window.swap);
+       window.bits_per_pixel = fb->bits_per_pixel;
+       window.bottom_up = tegra_fb_is_bottom_up(fb);
+
+       err = tegra_fb_get_tiling(fb, &window.tiling);
+       if (err < 0)
+               return err;
+
+       window.base[0] = bo->paddr + fb->offsets[0];
+       window.stride[0] = fb->pitches[0];
+
+       err = tegra_dc_setup_window(dc, p->index, &window);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static void tegra_primary_plane_destroy(struct drm_plane *plane)
+{
+       tegra_window_plane_disable(plane);
+       tegra_plane_destroy(plane);
+}
+
+static const struct drm_plane_funcs tegra_primary_plane_funcs = {
+       .update_plane = tegra_primary_plane_update,
+       .disable_plane = tegra_window_plane_disable,
+       .destroy = tegra_primary_plane_destroy,
+};
+
+static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
+                                                      struct tegra_dc *dc)
+{
+       struct tegra_plane *plane;
+       unsigned int num_formats;
+       const u32 *formats;
+       int err;
+
+       plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+       if (!plane)
+               return ERR_PTR(-ENOMEM);
+
+       num_formats = ARRAY_SIZE(tegra_primary_plane_formats);
+       formats = tegra_primary_plane_formats;
+
+       err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
+                                      &tegra_primary_plane_funcs, formats,
+                                      num_formats, DRM_PLANE_TYPE_PRIMARY);
+       if (err < 0) {
+               kfree(plane);
+               return ERR_PTR(err);
+       }
+
+       return &plane->base;
+}
+
+static const u32 tegra_cursor_plane_formats[] = {
+       DRM_FORMAT_RGBA8888,
+};
+
+static int tegra_cursor_plane_update(struct drm_plane *plane,
+                                    struct drm_crtc *crtc,
+                                    struct drm_framebuffer *fb, int crtc_x,
+                                    int crtc_y, unsigned int crtc_w,
+                                    unsigned int crtc_h, uint32_t src_x,
+                                    uint32_t src_y, uint32_t src_w,
+                                    uint32_t src_h)
+{
+       struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
+       struct tegra_dc *dc = to_tegra_dc(crtc);
+       u32 value = CURSOR_CLIP_DISPLAY;
+
+       /* scaling not supported for cursor */
+       if ((src_w >> 16 != crtc_w) || (src_h >> 16 != crtc_h))
+               return -EINVAL;
+
+       /* only square cursors supported */
+       if (src_w != src_h)
+               return -EINVAL;
+
+       switch (crtc_w) {
+       case 32:
+               value |= CURSOR_SIZE_32x32;
+               break;
+
+       case 64:
+               value |= CURSOR_SIZE_64x64;
+               break;
+
+       case 128:
+               value |= CURSOR_SIZE_128x128;
+               break;
+
+       case 256:
+               value |= CURSOR_SIZE_256x256;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       value |= (bo->paddr >> 10) & 0x3fffff;
+       tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       value = (bo->paddr >> 32) & 0x3;
+       tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
+#endif
+
+       /* enable cursor and set blend mode */
+       value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+       value |= CURSOR_ENABLE;
+       tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+       value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL);
+       value &= ~CURSOR_DST_BLEND_MASK;
+       value &= ~CURSOR_SRC_BLEND_MASK;
+       value |= CURSOR_MODE_NORMAL;
+       value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC;
+       value |= CURSOR_SRC_BLEND_K1_TIMES_SRC;
+       value |= CURSOR_ALPHA;
+       tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
+
+       /* position the cursor */
+       value = (crtc_y & 0x3fff) << 16 | (crtc_x & 0x3fff);
+       tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
+
+       /* apply changes */
+       tegra_dc_cursor_commit(dc);
+       tegra_dc_commit(dc);
+
+       return 0;
+}
+
+static int tegra_cursor_plane_disable(struct drm_plane *plane)
+{
+       struct tegra_dc *dc = to_tegra_dc(plane->crtc);
+       u32 value;
+
+       if (!plane->crtc)
+               return 0;
+
+       value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+       value &= ~CURSOR_ENABLE;
+       tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+       tegra_dc_cursor_commit(dc);
+       tegra_dc_commit(dc);
 
        return 0;
 }
 
-static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
-                             struct drm_framebuffer *fb, int crtc_x,
-                             int crtc_y, unsigned int crtc_w,
-                             unsigned int crtc_h, uint32_t src_x,
-                             uint32_t src_y, uint32_t src_w, uint32_t src_h)
+static const struct drm_plane_funcs tegra_cursor_plane_funcs = {
+       .update_plane = tegra_cursor_plane_update,
+       .disable_plane = tegra_cursor_plane_disable,
+       .destroy = tegra_plane_destroy,
+};
+
+static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
+                                                     struct tegra_dc *dc)
+{
+       struct tegra_plane *plane;
+       unsigned int num_formats;
+       const u32 *formats;
+       int err;
+
+       plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+       if (!plane)
+               return ERR_PTR(-ENOMEM);
+
+       num_formats = ARRAY_SIZE(tegra_cursor_plane_formats);
+       formats = tegra_cursor_plane_formats;
+
+       err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
+                                      &tegra_cursor_plane_funcs, formats,
+                                      num_formats, DRM_PLANE_TYPE_CURSOR);
+       if (err < 0) {
+               kfree(plane);
+               return ERR_PTR(err);
+       }
+
+       return &plane->base;
+}
+
+static int tegra_overlay_plane_update(struct drm_plane *plane,
+                                     struct drm_crtc *crtc,
+                                     struct drm_framebuffer *fb, int crtc_x,
+                                     int crtc_y, unsigned int crtc_w,
+                                     unsigned int crtc_h, uint32_t src_x,
+                                     uint32_t src_y, uint32_t src_w,
+                                     uint32_t src_h)
 {
        struct tegra_plane *p = to_tegra_plane(plane);
        struct tegra_dc *dc = to_tegra_dc(crtc);
@@ -359,44 +628,19 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
        return tegra_dc_setup_window(dc, p->index, &window);
 }
 
-static int tegra_plane_disable(struct drm_plane *plane)
+static void tegra_overlay_plane_destroy(struct drm_plane *plane)
 {
-       struct tegra_dc *dc = to_tegra_dc(plane->crtc);
-       struct tegra_plane *p = to_tegra_plane(plane);
-       unsigned long value;
-
-       if (!plane->crtc)
-               return 0;
-
-       value = WINDOW_A_SELECT << p->index;
-       tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
-
-       value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
-       value &= ~WIN_ENABLE;
-       tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
-
-       tegra_dc_writel(dc, WIN_A_UPDATE << p->index, DC_CMD_STATE_CONTROL);
-       tegra_dc_writel(dc, WIN_A_ACT_REQ << p->index, DC_CMD_STATE_CONTROL);
-
-       return 0;
-}
-
-static void tegra_plane_destroy(struct drm_plane *plane)
-{
-       struct tegra_plane *p = to_tegra_plane(plane);
-
-       tegra_plane_disable(plane);
-       drm_plane_cleanup(plane);
-       kfree(p);
+       tegra_window_plane_disable(plane);
+       tegra_plane_destroy(plane);
 }
 
-static const struct drm_plane_funcs tegra_plane_funcs = {
-       .update_plane = tegra_plane_update,
-       .disable_plane = tegra_plane_disable,
-       .destroy = tegra_plane_destroy,
+static const struct drm_plane_funcs tegra_overlay_plane_funcs = {
+       .update_plane = tegra_overlay_plane_update,
+       .disable_plane = tegra_window_plane_disable,
+       .destroy = tegra_overlay_plane_destroy,
 };
 
-static const uint32_t plane_formats[] = {
+static const uint32_t tegra_overlay_plane_formats[] = {
        DRM_FORMAT_XBGR8888,
        DRM_FORMAT_XRGB8888,
        DRM_FORMAT_RGB565,
@@ -406,27 +650,44 @@ static const uint32_t plane_formats[] = {
        DRM_FORMAT_YUV422,
 };
 
-static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
+static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
+                                                      struct tegra_dc *dc,
+                                                      unsigned int index)
 {
-       unsigned int i;
-       int err = 0;
+       struct tegra_plane *plane;
+       unsigned int num_formats;
+       const u32 *formats;
+       int err;
 
-       for (i = 0; i < 2; i++) {
-               struct tegra_plane *plane;
+       plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+       if (!plane)
+               return ERR_PTR(-ENOMEM);
 
-               plane = kzalloc(sizeof(*plane), GFP_KERNEL);
-               if (!plane)
-                       return -ENOMEM;
+       plane->index = index;
 
-               plane->index = 1 + i;
+       num_formats = ARRAY_SIZE(tegra_overlay_plane_formats);
+       formats = tegra_overlay_plane_formats;
 
-               err = drm_plane_init(drm, &plane->base, 1 << dc->pipe,
-                                    &tegra_plane_funcs, plane_formats,
-                                    ARRAY_SIZE(plane_formats), false);
-               if (err < 0) {
-                       kfree(plane);
-                       return err;
-               }
+       err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
+                                      &tegra_overlay_plane_funcs, formats,
+                                      num_formats, DRM_PLANE_TYPE_OVERLAY);
+       if (err < 0) {
+               kfree(plane);
+               return ERR_PTR(err);
+       }
+
+       return &plane->base;
+}
+
+static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
+{
+       struct drm_plane *plane;
+       unsigned int i;
+
+       for (i = 0; i < 2; i++) {
+               plane = tegra_dc_overlay_plane_create(drm, dc, 1 + i);
+               if (IS_ERR(plane))
+                       return PTR_ERR(plane);
        }
 
        return 0;
@@ -513,10 +774,8 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
        tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
        tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
 
-       value = GENERAL_UPDATE | WIN_A_UPDATE;
-       tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
-
        value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
+       tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
        tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
 
        return 0;
@@ -548,109 +807,6 @@ void tegra_dc_disable_vblank(struct tegra_dc *dc)
        spin_unlock_irqrestore(&dc->lock, flags);
 }
 
-static int tegra_dc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file,
-                               uint32_t handle, uint32_t width,
-                               uint32_t height, int32_t hot_x, int32_t hot_y)
-{
-       unsigned long value = CURSOR_CLIP_DISPLAY;
-       struct tegra_dc *dc = to_tegra_dc(crtc);
-       struct drm_gem_object *gem;
-       struct tegra_bo *bo = NULL;
-
-       if (!dc->soc->supports_cursor)
-               return -ENXIO;
-
-       if (width != height)
-               return -EINVAL;
-
-       switch (width) {
-       case 32:
-               value |= CURSOR_SIZE_32x32;
-               break;
-
-       case 64:
-               value |= CURSOR_SIZE_64x64;
-               break;
-
-       case 128:
-               value |= CURSOR_SIZE_128x128;
-
-       case 256:
-               value |= CURSOR_SIZE_256x256;
-               break;
-
-       default:
-               return -EINVAL;
-       }
-
-       if (handle) {
-               gem = drm_gem_object_lookup(crtc->dev, file, handle);
-               if (!gem)
-                       return -ENOENT;
-
-               bo = to_tegra_bo(gem);
-       }
-
-       if (bo) {
-               unsigned long addr = (bo->paddr & 0xfffffc00) >> 10;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-               unsigned long high = (bo->paddr & 0xfffffffc) >> 32;
-#endif
-
-               tegra_dc_writel(dc, value | addr, DC_DISP_CURSOR_START_ADDR);
-
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-               tegra_dc_writel(dc, high, DC_DISP_CURSOR_START_ADDR_HI);
-#endif
-
-               value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
-               value |= CURSOR_ENABLE;
-               tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
-               value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL);
-               value &= ~CURSOR_DST_BLEND_MASK;
-               value &= ~CURSOR_SRC_BLEND_MASK;
-               value |= CURSOR_MODE_NORMAL;
-               value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC;
-               value |= CURSOR_SRC_BLEND_K1_TIMES_SRC;
-               value |= CURSOR_ALPHA;
-               tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
-       } else {
-               value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
-               value &= ~CURSOR_ENABLE;
-               tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-       }
-
-       tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
-       tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
-
-       tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
-       tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
-
-       return 0;
-}
-
-static int tegra_dc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
-       struct tegra_dc *dc = to_tegra_dc(crtc);
-       unsigned long value;
-
-       if (!dc->soc->supports_cursor)
-               return -ENXIO;
-
-       value = ((y & 0x3fff) << 16) | (x & 0x3fff);
-       tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
-
-       tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
-       tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
-
-       /* XXX: only required on generations earlier than Tegra124? */
-       tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
-       tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
-
-       return 0;
-}
-
 static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
 {
        struct drm_device *drm = dc->base.dev;
@@ -727,8 +883,6 @@ static void tegra_dc_destroy(struct drm_crtc *crtc)
 }
 
 static const struct drm_crtc_funcs tegra_crtc_funcs = {
-       .cursor_set2 = tegra_dc_cursor_set2,
-       .cursor_move = tegra_dc_cursor_move,
        .page_flip = tegra_dc_page_flip,
        .set_config = drm_crtc_helper_set_config,
        .destroy = tegra_dc_destroy,
@@ -742,7 +896,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
 
        drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) {
                if (plane->crtc == crtc) {
-                       tegra_plane_disable(plane);
+                       tegra_window_plane_disable(plane);
                        plane->crtc = NULL;
 
                        if (plane->fb) {
@@ -752,7 +906,8 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
                }
        }
 
-       drm_vblank_off(drm, dc->pipe);
+       drm_crtc_vblank_off(crtc);
+       tegra_dc_commit(dc);
 }
 
 static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -841,8 +996,6 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
        u32 value;
        int err;
 
-       drm_vblank_pre_modeset(crtc->dev, dc->pipe);
-
        err = tegra_crtc_setup_clk(crtc, mode);
        if (err) {
                dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
@@ -896,6 +1049,8 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
        unsigned int syncpt;
        unsigned long value;
 
+       drm_crtc_vblank_off(crtc);
+
        /* hardware initialization */
        reset_control_deassert(dc->rst);
        usleep_range(10000, 20000);
@@ -935,15 +1090,9 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
 static void tegra_crtc_commit(struct drm_crtc *crtc)
 {
        struct tegra_dc *dc = to_tegra_dc(crtc);
-       unsigned long value;
-
-       value = GENERAL_UPDATE | WIN_A_UPDATE;
-       tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
 
-       value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
-       tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
-
-       drm_vblank_post_modeset(crtc->dev, dc->pipe);
+       drm_crtc_vblank_on(crtc);
+       tegra_dc_commit(dc);
 }
 
 static void tegra_crtc_load_lut(struct drm_crtc *crtc)
@@ -997,7 +1146,7 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
        struct tegra_dc *dc = node->info_ent->data;
 
 #define DUMP_REG(name)                                         \
-       seq_printf(s, "%-40s %#05x %08lx\n", #name, name,       \
+       seq_printf(s, "%-40s %#05x %08x\n", #name, name,        \
                   tegra_dc_readl(dc, name))
 
        DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT);
@@ -1285,9 +1434,40 @@ static int tegra_dc_init(struct host1x_client *client)
        struct drm_device *drm = dev_get_drvdata(client->parent);
        struct tegra_dc *dc = host1x_client_to_dc(client);
        struct tegra_drm *tegra = drm->dev_private;
+       struct drm_plane *primary = NULL;
+       struct drm_plane *cursor = NULL;
        int err;
 
-       drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
+       if (tegra->domain) {
+               err = iommu_attach_device(tegra->domain, dc->dev);
+               if (err < 0) {
+                       dev_err(dc->dev, "failed to attach to domain: %d\n",
+                               err);
+                       return err;
+               }
+
+               dc->domain = tegra->domain;
+       }
+
+       primary = tegra_dc_primary_plane_create(drm, dc);
+       if (IS_ERR(primary)) {
+               err = PTR_ERR(primary);
+               goto cleanup;
+       }
+
+       if (dc->soc->supports_cursor) {
+               cursor = tegra_dc_cursor_plane_create(drm, dc);
+               if (IS_ERR(cursor)) {
+                       err = PTR_ERR(cursor);
+                       goto cleanup;
+               }
+       }
+
+       err = drm_crtc_init_with_planes(drm, &dc->base, primary, cursor,
+                                       &tegra_crtc_funcs);
+       if (err < 0)
+               goto cleanup;
+
        drm_mode_crtc_set_gamma_size(&dc->base, 256);
        drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
 
@@ -1301,12 +1481,12 @@ static int tegra_dc_init(struct host1x_client *client)
        err = tegra_dc_rgb_init(drm, dc);
        if (err < 0 && err != -ENODEV) {
                dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
-               return err;
+               goto cleanup;
        }
 
        err = tegra_dc_add_planes(drm, dc);
        if (err < 0)
-               return err;
+               goto cleanup;
 
        if (IS_ENABLED(CONFIG_DEBUG_FS)) {
                err = tegra_dc_debugfs_init(dc, drm->primary);
@@ -1319,10 +1499,24 @@ static int tegra_dc_init(struct host1x_client *client)
        if (err < 0) {
                dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
                        err);
-               return err;
+               goto cleanup;
        }
 
        return 0;
+
+cleanup:
+       if (cursor)
+               drm_plane_cleanup(cursor);
+
+       if (primary)
+               drm_plane_cleanup(primary);
+
+       if (tegra->domain) {
+               iommu_detach_device(tegra->domain, dc->dev);
+               dc->domain = NULL;
+       }
+
+       return err;
 }
 
 static int tegra_dc_exit(struct host1x_client *client)
@@ -1344,6 +1538,11 @@ static int tegra_dc_exit(struct host1x_client *client)
                return err;
        }
 
+       if (dc->domain) {
+               iommu_detach_device(dc->domain, dc->dev);
+               dc->domain = NULL;
+       }
+
        return 0;
 }
 
@@ -1357,6 +1556,7 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
        .supports_cursor = false,
        .supports_block_linear = false,
        .pitch_align = 8,
+       .has_powergate = false,
 };
 
 static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
@@ -1364,6 +1564,7 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
        .supports_cursor = false,
        .supports_block_linear = false,
        .pitch_align = 8,
+       .has_powergate = false,
 };
 
 static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
@@ -1371,6 +1572,7 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
        .supports_cursor = false,
        .supports_block_linear = false,
        .pitch_align = 64,
+       .has_powergate = true,
 };
 
 static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
@@ -1378,12 +1580,16 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
        .supports_cursor = true,
        .supports_block_linear = true,
        .pitch_align = 64,
+       .has_powergate = true,
 };
 
 static const struct of_device_id tegra_dc_of_match[] = {
        {
                .compatible = "nvidia,tegra124-dc",
                .data = &tegra124_dc_soc_info,
+       }, {
+               .compatible = "nvidia,tegra114-dc",
+               .data = &tegra114_dc_soc_info,
        }, {
                .compatible = "nvidia,tegra30-dc",
                .data = &tegra30_dc_soc_info,
@@ -1467,9 +1673,34 @@ static int tegra_dc_probe(struct platform_device *pdev)
                return PTR_ERR(dc->rst);
        }
 
-       err = clk_prepare_enable(dc->clk);
-       if (err < 0)
-               return err;
+       if (dc->soc->has_powergate) {
+               if (dc->pipe == 0)
+                       dc->powergate = TEGRA_POWERGATE_DIS;
+               else
+                       dc->powergate = TEGRA_POWERGATE_DISB;
+
+               err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
+                                                       dc->rst);
+               if (err < 0) {
+                       dev_err(&pdev->dev, "failed to power partition: %d\n",
+                               err);
+                       return err;
+               }
+       } else {
+               err = clk_prepare_enable(dc->clk);
+               if (err < 0) {
+                       dev_err(&pdev->dev, "failed to enable clock: %d\n",
+                               err);
+                       return err;
+               }
+
+               err = reset_control_deassert(dc->rst);
+               if (err < 0) {
+                       dev_err(&pdev->dev, "failed to deassert reset: %d\n",
+                               err);
+                       return err;
+               }
+       }
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        dc->regs = devm_ioremap_resource(&pdev->dev, regs);
@@ -1523,6 +1754,10 @@ static int tegra_dc_remove(struct platform_device *pdev)
        }
 
        reset_control_assert(dc->rst);
+
+       if (dc->soc->has_powergate)
+               tegra_powergate_power_off(dc->powergate);
+
        clk_disable_unprepare(dc->clk);
 
        return 0;
index 59736bb810cd2d4787dab77d1fea29dadcf30f9f..e549afeece1ff12c899afc9f21dd52ff5e3e64ff 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/host1x.h>
+#include <linux/iommu.h>
 
 #include "drm.h"
 #include "gem.h"
@@ -33,6 +34,17 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
        if (!tegra)
                return -ENOMEM;
 
+       if (iommu_present(&platform_bus_type)) {
+               tegra->domain = iommu_domain_alloc(&platform_bus_type);
+               if (IS_ERR(tegra->domain)) {
+                       err = PTR_ERR(tegra->domain);
+                       goto free;
+               }
+
+               DRM_DEBUG("IOMMU context initialized\n");
+               drm_mm_init(&tegra->mm, 0, SZ_2G);
+       }
+
        mutex_init(&tegra->clients_lock);
        INIT_LIST_HEAD(&tegra->clients);
        drm->dev_private = tegra;
@@ -42,13 +54,13 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
 
        err = tegra_drm_fb_prepare(drm);
        if (err < 0)
-               return err;
+               goto config;
 
        drm_kms_helper_poll_init(drm);
 
        err = host1x_device_init(device);
        if (err < 0)
-               return err;
+               goto fbdev;
 
        /*
         * We don't use the drm_irq_install() helpers provided by the DRM
@@ -59,18 +71,37 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
 
        err = drm_vblank_init(drm, drm->mode_config.num_crtc);
        if (err < 0)
-               return err;
+               goto device;
 
        err = tegra_drm_fb_init(drm);
        if (err < 0)
-               return err;
+               goto vblank;
 
        return 0;
+
+vblank:
+       drm_vblank_cleanup(drm);
+device:
+       host1x_device_exit(device);
+fbdev:
+       drm_kms_helper_poll_fini(drm);
+       tegra_drm_fb_free(drm);
+config:
+       drm_mode_config_cleanup(drm);
+
+       if (tegra->domain) {
+               iommu_domain_free(tegra->domain);
+               drm_mm_takedown(&tegra->mm);
+       }
+free:
+       kfree(tegra);
+       return err;
 }
 
 static int tegra_drm_unload(struct drm_device *drm)
 {
        struct host1x_device *device = to_host1x_device(drm->dev);
+       struct tegra_drm *tegra = drm->dev_private;
        int err;
 
        drm_kms_helper_poll_fini(drm);
@@ -82,6 +113,13 @@ static int tegra_drm_unload(struct drm_device *drm)
        if (err < 0)
                return err;
 
+       if (tegra->domain) {
+               iommu_domain_free(tegra->domain);
+               drm_mm_takedown(&tegra->mm);
+       }
+
+       kfree(tegra);
+
        return 0;
 }
 
index e89c70fa82d554f3da8fced5ae99b840e56c985f..3a3b2e7b5b3f0f7902e03a32cdcaba65f20c8cdf 100644 (file)
@@ -39,6 +39,9 @@ struct tegra_fbdev {
 struct tegra_drm {
        struct drm_device *drm;
 
+       struct iommu_domain *domain;
+       struct drm_mm mm;
+
        struct mutex clients_lock;
        struct list_head clients;
 
@@ -101,6 +104,7 @@ struct tegra_dc {
        spinlock_t lock;
 
        struct drm_crtc base;
+       int powergate;
        int pipe;
 
        struct clk *clk;
@@ -120,6 +124,8 @@ struct tegra_dc {
        struct drm_pending_vblank_event *event;
 
        const struct tegra_dc_soc_info *soc;
+
+       struct iommu_domain *domain;
 };
 
 static inline struct tegra_dc *
@@ -133,16 +139,15 @@ static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
        return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
 }
 
-static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
-                                  unsigned long reg)
+static inline void tegra_dc_writel(struct tegra_dc *dc, u32 value,
+                                  unsigned long offset)
 {
-       writel(value, dc->regs + (reg << 2));
+       writel(value, dc->regs + (offset << 2));
 }
 
-static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
-                                          unsigned long reg)
+static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned long offset)
 {
-       return readl(dc->regs + (reg << 2));
+       return readl(dc->regs + (offset << 2));
 }
 
 struct tegra_dc_window {
@@ -287,6 +292,7 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
 int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
                        struct tegra_bo_tiling *tiling);
 int tegra_drm_fb_prepare(struct drm_device *drm);
+void tegra_drm_fb_free(struct drm_device *drm);
 int tegra_drm_fb_init(struct drm_device *drm);
 void tegra_drm_fb_exit(struct drm_device *drm);
 #ifdef CONFIG_DRM_TEGRA_FBDEV
index f7874458926a7f289d8df1a3e46ead9f066e1a82..33f67fd601c6076670496617d51366050f327238 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/host1x.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/reset.h>
 
@@ -26,9 +27,6 @@
 #include "dsi.h"
 #include "mipi-phy.h"
 
-#define DSI_VIDEO_FIFO_DEPTH (1920 / 4)
-#define DSI_HOST_FIFO_DEPTH 64
-
 struct tegra_dsi {
        struct host1x_client client;
        struct tegra_output output;
@@ -54,6 +52,13 @@ struct tegra_dsi {
 
        struct regulator *vdd;
        bool enabled;
+
+       unsigned int video_fifo_depth;
+       unsigned int host_fifo_depth;
+
+       /* for ganged-mode support */
+       struct tegra_dsi *master;
+       struct tegra_dsi *slave;
 };
 
 static inline struct tegra_dsi *
@@ -318,6 +323,21 @@ static const u32 pkt_seq_video_non_burst_sync_events[NUM_PKT_SEQ] = {
        [11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(4),
 };
 
+static const u32 pkt_seq_command_mode[NUM_PKT_SEQ] = {
+       [ 0] = 0,
+       [ 1] = 0,
+       [ 2] = 0,
+       [ 3] = 0,
+       [ 4] = 0,
+       [ 5] = 0,
+       [ 6] = PKT_ID0(MIPI_DSI_DCS_LONG_WRITE) | PKT_LEN0(3) | PKT_LP,
+       [ 7] = 0,
+       [ 8] = 0,
+       [ 9] = 0,
+       [10] = PKT_ID0(MIPI_DSI_DCS_LONG_WRITE) | PKT_LEN0(5) | PKT_LP,
+       [11] = 0,
+};
+
 static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
 {
        struct mipi_dphy_timing timing;
@@ -329,7 +349,7 @@ static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
        if (rate < 0)
                return rate;
 
-       period = DIV_ROUND_CLOSEST(1000000000UL, rate * 2);
+       period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, rate * 2);
 
        err = mipi_dphy_timing_get_default(&timing, period);
        if (err < 0)
@@ -369,6 +389,9 @@ static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
                DSI_TIMING_FIELD(timing.tago, period, 1);
        tegra_dsi_writel(dsi, value, DSI_BTA_TIMING);
 
+       if (dsi->slave)
+               return tegra_dsi_set_phy_timing(dsi->slave);
+
        return 0;
 }
 
@@ -426,26 +449,59 @@ static int tegra_dsi_get_format(enum mipi_dsi_pixel_format format,
        return 0;
 }
 
-static int tegra_output_dsi_enable(struct tegra_output *output)
+static void tegra_dsi_ganged_enable(struct tegra_dsi *dsi, unsigned int start,
+                                   unsigned int size)
+{
+       u32 value;
+
+       tegra_dsi_writel(dsi, start, DSI_GANGED_MODE_START);
+       tegra_dsi_writel(dsi, size << 16 | size, DSI_GANGED_MODE_SIZE);
+
+       value = DSI_GANGED_MODE_CONTROL_ENABLE;
+       tegra_dsi_writel(dsi, value, DSI_GANGED_MODE_CONTROL);
+}
+
+static void tegra_dsi_enable(struct tegra_dsi *dsi)
+{
+       u32 value;
+
+       value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+       value |= DSI_POWER_CONTROL_ENABLE;
+       tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+       if (dsi->slave)
+               tegra_dsi_enable(dsi->slave);
+}
+
+static unsigned int tegra_dsi_get_lanes(struct tegra_dsi *dsi)
+{
+       if (dsi->master)
+               return dsi->master->lanes + dsi->lanes;
+
+       if (dsi->slave)
+               return dsi->lanes + dsi->slave->lanes;
+
+       return dsi->lanes;
+}
+
+static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
+                              const struct drm_display_mode *mode)
 {
-       struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
-       struct drm_display_mode *mode = &dc->base.mode;
        unsigned int hact, hsw, hbp, hfp, i, mul, div;
-       struct tegra_dsi *dsi = to_dsi(output);
        enum tegra_dsi_format format;
-       unsigned long value;
        const u32 *pkt_seq;
+       u32 value;
        int err;
 
-       if (dsi->enabled)
-               return 0;
-
        if (dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
                DRM_DEBUG_KMS("Non-burst video mode with sync pulses\n");
                pkt_seq = pkt_seq_video_non_burst_sync_pulses;
-       } else {
+       } else if (dsi->flags & MIPI_DSI_MODE_VIDEO) {
                DRM_DEBUG_KMS("Non-burst video mode with sync events\n");
                pkt_seq = pkt_seq_video_non_burst_sync_events;
+       } else {
+               DRM_DEBUG_KMS("Command mode\n");
+               pkt_seq = pkt_seq_command_mode;
        }
 
        err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
@@ -456,61 +512,136 @@ static int tegra_output_dsi_enable(struct tegra_output *output)
        if (err < 0)
                return err;
 
-       err = clk_enable(dsi->clk);
-       if (err < 0)
-               return err;
-
-       reset_control_deassert(dsi->rst);
-
        value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(format) |
                DSI_CONTROL_LANES(dsi->lanes - 1) |
-               DSI_CONTROL_SOURCE(dc->pipe);
+               DSI_CONTROL_SOURCE(pipe);
        tegra_dsi_writel(dsi, value, DSI_CONTROL);
 
-       tegra_dsi_writel(dsi, DSI_VIDEO_FIFO_DEPTH, DSI_MAX_THRESHOLD);
+       tegra_dsi_writel(dsi, dsi->video_fifo_depth, DSI_MAX_THRESHOLD);
 
-       value = DSI_HOST_CONTROL_HS | DSI_HOST_CONTROL_CS |
-               DSI_HOST_CONTROL_ECC;
+       value = DSI_HOST_CONTROL_HS;
        tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
 
        value = tegra_dsi_readl(dsi, DSI_CONTROL);
+
        if (dsi->flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
                value |= DSI_CONTROL_HS_CLK_CTRL;
+
        value &= ~DSI_CONTROL_TX_TRIG(3);
-       value &= ~DSI_CONTROL_DCS_ENABLE;
+
+       /* enable DCS commands for command mode */
+       if (dsi->flags & MIPI_DSI_MODE_VIDEO)
+               value &= ~DSI_CONTROL_DCS_ENABLE;
+       else
+               value |= DSI_CONTROL_DCS_ENABLE;
+
        value |= DSI_CONTROL_VIDEO_ENABLE;
        value &= ~DSI_CONTROL_HOST_ENABLE;
        tegra_dsi_writel(dsi, value, DSI_CONTROL);
 
-       err = tegra_dsi_set_phy_timing(dsi);
-       if (err < 0)
-               return err;
-
        for (i = 0; i < NUM_PKT_SEQ; i++)
                tegra_dsi_writel(dsi, pkt_seq[i], DSI_PKT_SEQ_0_LO + i);
 
-       /* horizontal active pixels */
-       hact = mode->hdisplay * mul / div;
+       if (dsi->flags & MIPI_DSI_MODE_VIDEO) {
+               /* horizontal active pixels */
+               hact = mode->hdisplay * mul / div;
 
-       /* horizontal sync width */
-       hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
-       hsw -= 10;
+               /* horizontal sync width */
+               hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
+               hsw -= 10;
 
-       /* horizontal back porch */
-       hbp = (mode->htotal - mode->hsync_end) * mul / div;
-       hbp -= 14;
+               /* horizontal back porch */
+               hbp = (mode->htotal - mode->hsync_end) * mul / div;
+               hbp -= 14;
 
-       /* horizontal front porch */
-       hfp = (mode->hsync_start  - mode->hdisplay) * mul / div;
-       hfp -= 8;
+               /* horizontal front porch */
+               hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
+               hfp -= 8;
 
-       tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
-       tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3);
-       tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5);
-       tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7);
+               tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
+               tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3);
+               tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5);
+               tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7);
 
-       /* set SOL delay */
-       tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY);
+               /* set SOL delay (for non-burst mode only) */
+               tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY);
+
+               /* TODO: implement ganged mode */
+       } else {
+               u16 bytes;
+
+               if (dsi->master || dsi->slave) {
+                       /*
+                        * For ganged mode, assume symmetric left-right mode.
+                        */
+                       bytes = 1 + (mode->hdisplay / 2) * mul / div;
+               } else {
+                       /* 1 byte (DCS command) + pixel data */
+                       bytes = 1 + mode->hdisplay * mul / div;
+               }
+
+               tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_0_1);
+               tegra_dsi_writel(dsi, bytes << 16, DSI_PKT_LEN_2_3);
+               tegra_dsi_writel(dsi, bytes << 16, DSI_PKT_LEN_4_5);
+               tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_6_7);
+
+               value = MIPI_DCS_WRITE_MEMORY_START << 8 |
+                       MIPI_DCS_WRITE_MEMORY_CONTINUE;
+               tegra_dsi_writel(dsi, value, DSI_DCS_CMDS);
+
+               /* set SOL delay */
+               if (dsi->master || dsi->slave) {
+                       unsigned int lanes = tegra_dsi_get_lanes(dsi);
+                       unsigned long delay, bclk, bclk_ganged;
+
+                       /* SOL to valid, valid to FIFO and FIFO write delay */
+                       delay = 4 + 4 + 2;
+                       delay = DIV_ROUND_UP(delay * mul, div * lanes);
+                       /* FIFO read delay */
+                       delay = delay + 6;
+
+                       bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes);
+                       bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes);
+                       value = bclk - bclk_ganged + delay + 20;
+               } else {
+                       /* TODO: revisit for non-ganged mode */
+                       value = 8 * mul / div;
+               }
+
+               tegra_dsi_writel(dsi, value, DSI_SOL_DELAY);
+       }
+
+       if (dsi->slave) {
+               err = tegra_dsi_configure(dsi->slave, pipe, mode);
+               if (err < 0)
+                       return err;
+
+               /*
+                * TODO: Support modes other than symmetrical left-right
+                * split.
+                */
+               tegra_dsi_ganged_enable(dsi, 0, mode->hdisplay / 2);
+               tegra_dsi_ganged_enable(dsi->slave, mode->hdisplay / 2,
+                                       mode->hdisplay / 2);
+       }
+
+       return 0;
+}
+
+static int tegra_output_dsi_enable(struct tegra_output *output)
+{
+       struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+       const struct drm_display_mode *mode = &dc->base.mode;
+       struct tegra_dsi *dsi = to_dsi(output);
+       u32 value;
+       int err;
+
+       if (dsi->enabled)
+               return 0;
+
+       err = tegra_dsi_configure(dsi, dc->pipe, mode);
+       if (err < 0)
+               return err;
 
        /* enable display controller */
        value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
@@ -531,28 +662,79 @@ static int tegra_output_dsi_enable(struct tegra_output *output)
        tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
 
        /* enable DSI controller */
-       value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
-       value |= DSI_POWER_CONTROL_ENABLE;
-       tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+       tegra_dsi_enable(dsi);
 
        dsi->enabled = true;
 
        return 0;
 }
 
+static int tegra_dsi_wait_idle(struct tegra_dsi *dsi, unsigned long timeout)
+{
+       u32 value;
+
+       timeout = jiffies + msecs_to_jiffies(timeout);
+
+       while (time_before(jiffies, timeout)) {
+               value = tegra_dsi_readl(dsi, DSI_STATUS);
+               if (value & DSI_STATUS_IDLE)
+                       return 0;
+
+               usleep_range(1000, 2000);
+       }
+
+       return -ETIMEDOUT;
+}
+
+static void tegra_dsi_video_disable(struct tegra_dsi *dsi)
+{
+       u32 value;
+
+       value = tegra_dsi_readl(dsi, DSI_CONTROL);
+       value &= ~DSI_CONTROL_VIDEO_ENABLE;
+       tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+       if (dsi->slave)
+               tegra_dsi_video_disable(dsi->slave);
+}
+
+static void tegra_dsi_ganged_disable(struct tegra_dsi *dsi)
+{
+       tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_START);
+       tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_SIZE);
+       tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
+}
+
+static void tegra_dsi_disable(struct tegra_dsi *dsi)
+{
+       u32 value;
+
+       if (dsi->slave) {
+               tegra_dsi_ganged_disable(dsi->slave);
+               tegra_dsi_ganged_disable(dsi);
+       }
+
+       value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+       value &= ~DSI_POWER_CONTROL_ENABLE;
+       tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+       if (dsi->slave)
+               tegra_dsi_disable(dsi->slave);
+
+       usleep_range(5000, 10000);
+}
+
 static int tegra_output_dsi_disable(struct tegra_output *output)
 {
        struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
        struct tegra_dsi *dsi = to_dsi(output);
        unsigned long value;
+       int err;
 
        if (!dsi->enabled)
                return 0;
 
-       /* disable DSI controller */
-       value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
-       value &= ~DSI_POWER_CONTROL_ENABLE;
-       tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+       tegra_dsi_video_disable(dsi);
 
        /*
         * The following accesses registers of the display controller, so make
@@ -576,39 +758,68 @@ static int tegra_output_dsi_disable(struct tegra_output *output)
                tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
        }
 
-       clk_disable(dsi->clk);
+       err = tegra_dsi_wait_idle(dsi, 100);
+       if (err < 0)
+               dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err);
+
+       tegra_dsi_disable(dsi);
 
        dsi->enabled = false;
 
        return 0;
 }
 
+static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
+                                 unsigned int vrefresh)
+{
+       unsigned int timeout;
+       u32 value;
+
+       /* one frame high-speed transmission timeout */
+       timeout = (bclk / vrefresh) / 512;
+       value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout);
+       tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
+
+       /* 2 ms peripheral timeout for panel */
+       timeout = 2 * bclk / 512 * 1000;
+       value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
+       tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
+
+       value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
+       tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
+
+       if (dsi->slave)
+               tegra_dsi_set_timeout(dsi->slave, bclk, vrefresh);
+}
+
 static int tegra_output_dsi_setup_clock(struct tegra_output *output,
                                        struct clk *clk, unsigned long pclk,
                                        unsigned int *divp)
 {
        struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
        struct drm_display_mode *mode = &dc->base.mode;
-       unsigned int timeout, mul, div, vrefresh;
        struct tegra_dsi *dsi = to_dsi(output);
-       unsigned long bclk, plld, value;
+       unsigned int mul, div, vrefresh, lanes;
+       unsigned long bclk, plld;
        int err;
 
+       lanes = tegra_dsi_get_lanes(dsi);
+
        err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
        if (err < 0)
                return err;
 
-       DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", mul, div, dsi->lanes);
+       DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", mul, div, lanes);
        vrefresh = drm_mode_vrefresh(mode);
        DRM_DEBUG_KMS("vrefresh: %u\n", vrefresh);
 
        /* compute byte clock */
-       bclk = (pclk * mul) / (div * dsi->lanes);
+       bclk = (pclk * mul) / (div * lanes);
 
        /*
         * Compute bit clock and round up to the next MHz.
         */
-       plld = DIV_ROUND_UP(bclk * 8, 1000000) * 1000000;
+       plld = DIV_ROUND_UP(bclk * 8, USEC_PER_SEC) * USEC_PER_SEC;
 
        /*
         * We divide the frequency by two here, but we make up for that by
@@ -640,25 +851,17 @@ static int tegra_output_dsi_setup_clock(struct tegra_output *output,
         * not working properly otherwise. Perhaps the PLLs cannot generate
         * frequencies sufficiently high.
         */
-       *divp = ((8 * mul) / (div * dsi->lanes)) - 2;
+       *divp = ((8 * mul) / (div * lanes)) - 2;
 
        /*
         * XXX: Move the below somewhere else so that we don't need to have
         * access to the vrefresh in this function?
         */
+       tegra_dsi_set_timeout(dsi, bclk, vrefresh);
 
-       /* one frame high-speed transmission timeout */
-       timeout = (bclk / vrefresh) / 512;
-       value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout);
-       tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
-
-       /* 2 ms peripheral timeout for panel */
-       timeout = 2 * bclk / 512 * 1000;
-       value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
-       tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
-
-       value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
-       tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
+       err = tegra_dsi_set_phy_timing(dsi);
+       if (err < 0)
+               return err;
 
        return 0;
 }
@@ -695,7 +898,7 @@ static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
 
 static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
 {
-       unsigned long value;
+       u32 value;
 
        tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
        tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
@@ -720,14 +923,17 @@ static int tegra_dsi_init(struct host1x_client *client)
        struct tegra_dsi *dsi = host1x_client_to_dsi(client);
        int err;
 
-       dsi->output.type = TEGRA_OUTPUT_DSI;
-       dsi->output.dev = client->dev;
-       dsi->output.ops = &dsi_ops;
-
-       err = tegra_output_init(drm, &dsi->output);
-       if (err < 0) {
-               dev_err(client->dev, "output setup failed: %d\n", err);
-               return err;
+       /* Gangsters must not register their own outputs. */
+       if (!dsi->master) {
+               dsi->output.type = TEGRA_OUTPUT_DSI;
+               dsi->output.dev = client->dev;
+               dsi->output.ops = &dsi_ops;
+
+               err = tegra_output_init(drm, &dsi->output);
+               if (err < 0) {
+                       dev_err(client->dev, "output setup failed: %d\n", err);
+                       return err;
+               }
        }
 
        if (IS_ENABLED(CONFIG_DEBUG_FS)) {
@@ -736,12 +942,6 @@ static int tegra_dsi_init(struct host1x_client *client)
                        dev_err(dsi->dev, "debugfs setup failed: %d\n", err);
        }
 
-       err = tegra_dsi_pad_calibrate(dsi);
-       if (err < 0) {
-               dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
-               return err;
-       }
-
        return 0;
 }
 
@@ -756,16 +956,20 @@ static int tegra_dsi_exit(struct host1x_client *client)
                        dev_err(dsi->dev, "debugfs cleanup failed: %d\n", err);
        }
 
-       err = tegra_output_disable(&dsi->output);
-       if (err < 0) {
-               dev_err(client->dev, "output failed to disable: %d\n", err);
-               return err;
-       }
-
-       err = tegra_output_exit(&dsi->output);
-       if (err < 0) {
-               dev_err(client->dev, "output cleanup failed: %d\n", err);
-               return err;
+       if (!dsi->master) {
+               err = tegra_output_disable(&dsi->output);
+               if (err < 0) {
+                       dev_err(client->dev, "output failed to disable: %d\n",
+                               err);
+                       return err;
+               }
+
+               err = tegra_output_exit(&dsi->output);
+               if (err < 0) {
+                       dev_err(client->dev, "output cleanup failed: %d\n",
+                               err);
+                       return err;
+               }
        }
 
        return 0;
@@ -792,20 +996,324 @@ static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
        return 0;
 }
 
+static const char * const error_report[16] = {
+       "SoT Error",
+       "SoT Sync Error",
+       "EoT Sync Error",
+       "Escape Mode Entry Command Error",
+       "Low-Power Transmit Sync Error",
+       "Peripheral Timeout Error",
+       "False Control Error",
+       "Contention Detected",
+       "ECC Error, single-bit",
+       "ECC Error, multi-bit",
+       "Checksum Error",
+       "DSI Data Type Not Recognized",
+       "DSI VC ID Invalid",
+       "Invalid Transmission Length",
+       "Reserved",
+       "DSI Protocol Violation",
+};
+
+static ssize_t tegra_dsi_read_response(struct tegra_dsi *dsi,
+                                      const struct mipi_dsi_msg *msg,
+                                      size_t count)
+{
+       u8 *rx = msg->rx_buf;
+       unsigned int i, j, k;
+       size_t size = 0;
+       u16 errors;
+       u32 value;
+
+       /* read and parse packet header */
+       value = tegra_dsi_readl(dsi, DSI_RD_DATA);
+
+       switch (value & 0x3f) {
+       case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+               errors = (value >> 8) & 0xffff;
+               dev_dbg(dsi->dev, "Acknowledge and error report: %04x\n",
+                       errors);
+               for (i = 0; i < ARRAY_SIZE(error_report); i++)
+                       if (errors & BIT(i))
+                               dev_dbg(dsi->dev, "  %2u: %s\n", i,
+                                       error_report[i]);
+               break;
+
+       case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+               rx[0] = (value >> 8) & 0xff;
+               size = 1;
+               break;
+
+       case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+               rx[0] = (value >>  8) & 0xff;
+               rx[1] = (value >> 16) & 0xff;
+               size = 2;
+               break;
+
+       case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+               size = ((value >> 8) & 0xff00) | ((value >> 8) & 0xff);
+               break;
+
+       case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+               size = ((value >> 8) & 0xff00) | ((value >> 8) & 0xff);
+               break;
+
+       default:
+               dev_err(dsi->dev, "unhandled response type: %02x\n",
+                       value & 0x3f);
+               return -EPROTO;
+       }
+
+       size = min(size, msg->rx_len);
+
+       if (msg->rx_buf && size > 0) {
+               for (i = 0, j = 0; i < count - 1; i++, j += 4) {
+                       u8 *rx = msg->rx_buf + j;
+
+                       value = tegra_dsi_readl(dsi, DSI_RD_DATA);
+
+                       for (k = 0; k < 4 && (j + k) < msg->rx_len; k++)
+                               rx[j + k] = (value >> (k << 3)) & 0xff;
+               }
+       }
+
+       return size;
+}
+
+static int tegra_dsi_transmit(struct tegra_dsi *dsi, unsigned long timeout)
+{
+       tegra_dsi_writel(dsi, DSI_TRIGGER_HOST, DSI_TRIGGER);
+
+       timeout = jiffies + msecs_to_jiffies(timeout);
+
+       while (time_before(jiffies, timeout)) {
+               u32 value = tegra_dsi_readl(dsi, DSI_TRIGGER);
+               if ((value & DSI_TRIGGER_HOST) == 0)
+                       return 0;
+
+               usleep_range(1000, 2000);
+       }
+
+       DRM_DEBUG_KMS("timeout waiting for transmission to complete\n");
+       return -ETIMEDOUT;
+}
+
+static int tegra_dsi_wait_for_response(struct tegra_dsi *dsi,
+                                      unsigned long timeout)
+{
+       timeout = jiffies + msecs_to_jiffies(250);
+
+       while (time_before(jiffies, timeout)) {
+               u32 value = tegra_dsi_readl(dsi, DSI_STATUS);
+               u8 count = value & 0x1f;
+
+               if (count > 0)
+                       return count;
+
+               usleep_range(1000, 2000);
+       }
+
+       DRM_DEBUG_KMS("peripheral returned no data\n");
+       return -ETIMEDOUT;
+}
+
+static void tegra_dsi_writesl(struct tegra_dsi *dsi, unsigned long offset,
+                             const void *buffer, size_t size)
+{
+       const u8 *buf = buffer;
+       size_t i, j;
+       u32 value;
+
+       for (j = 0; j < size; j += 4) {
+               value = 0;
+
+               for (i = 0; i < 4 && j + i < size; i++)
+                       value |= buf[j + i] << (i << 3);
+
+               tegra_dsi_writel(dsi, value, DSI_WR_DATA);
+       }
+}
+
+static ssize_t tegra_dsi_host_transfer(struct mipi_dsi_host *host,
+                                      const struct mipi_dsi_msg *msg)
+{
+       struct tegra_dsi *dsi = host_to_tegra(host);
+       struct mipi_dsi_packet packet;
+       const u8 *header;
+       size_t count;
+       ssize_t err;
+       u32 value;
+
+       err = mipi_dsi_create_packet(&packet, msg);
+       if (err < 0)
+               return err;
+
+       header = packet.header;
+
+       /* maximum FIFO depth is 1920 words */
+       if (packet.size > dsi->video_fifo_depth * 4)
+               return -ENOSPC;
+
+       /* reset underflow/overflow flags */
+       value = tegra_dsi_readl(dsi, DSI_STATUS);
+       if (value & (DSI_STATUS_UNDERFLOW | DSI_STATUS_OVERFLOW)) {
+               value = DSI_HOST_CONTROL_FIFO_RESET;
+               tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+               usleep_range(10, 20);
+       }
+
+       value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+       value |= DSI_POWER_CONTROL_ENABLE;
+       tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+       usleep_range(5000, 10000);
+
+       value = DSI_HOST_CONTROL_CRC_RESET | DSI_HOST_CONTROL_TX_TRIG_HOST |
+               DSI_HOST_CONTROL_CS | DSI_HOST_CONTROL_ECC;
+
+       if ((msg->flags & MIPI_DSI_MSG_USE_LPM) == 0)
+               value |= DSI_HOST_CONTROL_HS;
+
+       /*
+        * The host FIFO has a maximum of 64 words, so larger transmissions
+        * need to use the video FIFO.
+        */
+       if (packet.size > dsi->host_fifo_depth * 4)
+               value |= DSI_HOST_CONTROL_FIFO_SEL;
+
+       tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+
+       /*
+        * For reads and messages with explicitly requested ACK, generate a
+        * BTA sequence after the transmission of the packet.
+        */
+       if ((msg->flags & MIPI_DSI_MSG_REQ_ACK) ||
+           (msg->rx_buf && msg->rx_len > 0)) {
+               value = tegra_dsi_readl(dsi, DSI_HOST_CONTROL);
+               value |= DSI_HOST_CONTROL_PKT_BTA;
+               tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+       }
+
+       value = DSI_CONTROL_LANES(0) | DSI_CONTROL_HOST_ENABLE;
+       tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+       /* write packet header, ECC is generated by hardware */
+       value = header[2] << 16 | header[1] << 8 | header[0];
+       tegra_dsi_writel(dsi, value, DSI_WR_DATA);
+
+       /* write payload (if any) */
+       if (packet.payload_length > 0)
+               tegra_dsi_writesl(dsi, DSI_WR_DATA, packet.payload,
+                                 packet.payload_length);
+
+       err = tegra_dsi_transmit(dsi, 250);
+       if (err < 0)
+               return err;
+
+       if ((msg->flags & MIPI_DSI_MSG_REQ_ACK) ||
+           (msg->rx_buf && msg->rx_len > 0)) {
+               err = tegra_dsi_wait_for_response(dsi, 250);
+               if (err < 0)
+                       return err;
+
+               count = err;
+
+               value = tegra_dsi_readl(dsi, DSI_RD_DATA);
+               switch (value) {
+               case 0x84:
+                       /*
+                       dev_dbg(dsi->dev, "ACK\n");
+                       */
+                       break;
+
+               case 0x87:
+                       /*
+                       dev_dbg(dsi->dev, "ESCAPE\n");
+                       */
+                       break;
+
+               default:
+                       dev_err(dsi->dev, "unknown status: %08x\n", value);
+                       break;
+               }
+
+               if (count > 1) {
+                       err = tegra_dsi_read_response(dsi, msg, count);
+                       if (err < 0)
+                               dev_err(dsi->dev,
+                                       "failed to parse response: %zd\n",
+                                       err);
+                       else {
+                               /*
+                                * For read commands, return the number of
+                                * bytes returned by the peripheral.
+                                */
+                               count = err;
+                       }
+               }
+       } else {
+               /*
+                * For write commands, we have transmitted the 4-byte header
+                * plus the variable-length payload.
+                */
+               count = 4 + packet.payload_length;
+       }
+
+       return count;
+}
+
+static int tegra_dsi_ganged_setup(struct tegra_dsi *dsi)
+{
+       struct clk *parent;
+       int err;
+
+       /* make sure both DSI controllers share the same PLL */
+       parent = clk_get_parent(dsi->slave->clk);
+       if (!parent)
+               return -EINVAL;
+
+       err = clk_set_parent(parent, dsi->clk_parent);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
 static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
                                 struct mipi_dsi_device *device)
 {
        struct tegra_dsi *dsi = host_to_tegra(host);
-       struct tegra_output *output = &dsi->output;
 
        dsi->flags = device->mode_flags;
        dsi->format = device->format;
        dsi->lanes = device->lanes;
 
-       output->panel = of_drm_find_panel(device->dev.of_node);
-       if (output->panel) {
-               if (output->connector.dev)
+       if (dsi->slave) {
+               int err;
+
+               dev_dbg(dsi->dev, "attaching dual-channel device %s\n",
+                       dev_name(&device->dev));
+
+               err = tegra_dsi_ganged_setup(dsi);
+               if (err < 0) {
+                       dev_err(dsi->dev, "failed to set up ganged mode: %d\n",
+                               err);
+                       return err;
+               }
+       }
+
+       /*
+        * Slaves don't have a panel associated with them, so they provide
+        * merely the second channel.
+        */
+       if (!dsi->master) {
+               struct tegra_output *output = &dsi->output;
+
+               output->panel = of_drm_find_panel(device->dev.of_node);
+               if (output->panel && output->connector.dev) {
+                       drm_panel_attach(output->panel, &output->connector);
                        drm_helper_hpd_irq_event(output->connector.dev);
+               }
        }
 
        return 0;
@@ -818,10 +1326,10 @@ static int tegra_dsi_host_detach(struct mipi_dsi_host *host,
        struct tegra_output *output = &dsi->output;
 
        if (output->panel && &device->dev == output->panel->dev) {
+               output->panel = NULL;
+
                if (output->connector.dev)
                        drm_helper_hpd_irq_event(output->connector.dev);
-
-               output->panel = NULL;
        }
 
        return 0;
@@ -830,8 +1338,29 @@ static int tegra_dsi_host_detach(struct mipi_dsi_host *host,
 static const struct mipi_dsi_host_ops tegra_dsi_host_ops = {
        .attach = tegra_dsi_host_attach,
        .detach = tegra_dsi_host_detach,
+       .transfer = tegra_dsi_host_transfer,
 };
 
+static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
+{
+       struct device_node *np;
+
+       np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0);
+       if (np) {
+               struct platform_device *gangster = of_find_device_by_node(np);
+
+               dsi->slave = platform_get_drvdata(gangster);
+               of_node_put(np);
+
+               if (!dsi->slave)
+                       return -EPROBE_DEFER;
+
+               dsi->slave->master = dsi;
+       }
+
+       return 0;
+}
+
 static int tegra_dsi_probe(struct platform_device *pdev)
 {
        struct tegra_dsi *dsi;
@@ -843,11 +1372,19 @@ static int tegra_dsi_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        dsi->output.dev = dsi->dev = &pdev->dev;
+       dsi->video_fifo_depth = 1920;
+       dsi->host_fifo_depth = 64;
+
+       err = tegra_dsi_ganged_probe(dsi);
+       if (err < 0)
+               return err;
 
        err = tegra_output_probe(&dsi->output);
        if (err < 0)
                return err;
 
+       dsi->output.connector.polled = DRM_CONNECTOR_POLL_HPD;
+
        /*
         * Assume these values by default. When a DSI peripheral driver
         * attaches to the DSI host, the parameters will be taken from
@@ -861,68 +1398,83 @@ static int tegra_dsi_probe(struct platform_device *pdev)
        if (IS_ERR(dsi->rst))
                return PTR_ERR(dsi->rst);
 
+       err = reset_control_deassert(dsi->rst);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to bring DSI out of reset: %d\n",
+                       err);
+               return err;
+       }
+
        dsi->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(dsi->clk)) {
                dev_err(&pdev->dev, "cannot get DSI clock\n");
-               return PTR_ERR(dsi->clk);
+               err = PTR_ERR(dsi->clk);
+               goto reset;
        }
 
        err = clk_prepare_enable(dsi->clk);
        if (err < 0) {
                dev_err(&pdev->dev, "cannot enable DSI clock\n");
-               return err;
+               goto reset;
        }
 
        dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
        if (IS_ERR(dsi->clk_lp)) {
                dev_err(&pdev->dev, "cannot get low-power clock\n");
-               return PTR_ERR(dsi->clk_lp);
+               err = PTR_ERR(dsi->clk_lp);
+               goto disable_clk;
        }
 
        err = clk_prepare_enable(dsi->clk_lp);
        if (err < 0) {
                dev_err(&pdev->dev, "cannot enable low-power clock\n");
-               return err;
+               goto disable_clk;
        }
 
        dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
        if (IS_ERR(dsi->clk_parent)) {
                dev_err(&pdev->dev, "cannot get parent clock\n");
-               return PTR_ERR(dsi->clk_parent);
-       }
-
-       err = clk_prepare_enable(dsi->clk_parent);
-       if (err < 0) {
-               dev_err(&pdev->dev, "cannot enable parent clock\n");
-               return err;
+               err = PTR_ERR(dsi->clk_parent);
+               goto disable_clk_lp;
        }
 
        dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
        if (IS_ERR(dsi->vdd)) {
                dev_err(&pdev->dev, "cannot get VDD supply\n");
-               return PTR_ERR(dsi->vdd);
+               err = PTR_ERR(dsi->vdd);
+               goto disable_clk_lp;
        }
 
        err = regulator_enable(dsi->vdd);
        if (err < 0) {
                dev_err(&pdev->dev, "cannot enable VDD supply\n");
-               return err;
+               goto disable_clk_lp;
        }
 
        err = tegra_dsi_setup_clocks(dsi);
        if (err < 0) {
                dev_err(&pdev->dev, "cannot setup clocks\n");
-               return err;
+               goto disable_vdd;
        }
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
-       if (IS_ERR(dsi->regs))
-               return PTR_ERR(dsi->regs);
+       if (IS_ERR(dsi->regs)) {
+               err = PTR_ERR(dsi->regs);
+               goto disable_vdd;
+       }
 
        dsi->mipi = tegra_mipi_request(&pdev->dev);
-       if (IS_ERR(dsi->mipi))
-               return PTR_ERR(dsi->mipi);
+       if (IS_ERR(dsi->mipi)) {
+               err = PTR_ERR(dsi->mipi);
+               goto disable_vdd;
+       }
+
+       err = tegra_dsi_pad_calibrate(dsi);
+       if (err < 0) {
+               dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
+               goto mipi_free;
+       }
 
        dsi->host.ops = &tegra_dsi_host_ops;
        dsi->host.dev = &pdev->dev;
@@ -930,7 +1482,7 @@ static int tegra_dsi_probe(struct platform_device *pdev)
        err = mipi_dsi_host_register(&dsi->host);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to register DSI host: %d\n", err);
-               return err;
+               goto mipi_free;
        }
 
        INIT_LIST_HEAD(&dsi->client.list);
@@ -941,12 +1493,26 @@ static int tegra_dsi_probe(struct platform_device *pdev)
        if (err < 0) {
                dev_err(&pdev->dev, "failed to register host1x client: %d\n",
                        err);
-               return err;
+               goto unregister;
        }
 
        platform_set_drvdata(pdev, dsi);
 
        return 0;
+
+unregister:
+       mipi_dsi_host_unregister(&dsi->host);
+mipi_free:
+       tegra_mipi_free(dsi->mipi);
+disable_vdd:
+       regulator_disable(dsi->vdd);
+disable_clk_lp:
+       clk_disable_unprepare(dsi->clk_lp);
+disable_clk:
+       clk_disable_unprepare(dsi->clk);
+reset:
+       reset_control_assert(dsi->rst);
+       return err;
 }
 
 static int tegra_dsi_remove(struct platform_device *pdev)
@@ -965,7 +1531,6 @@ static int tegra_dsi_remove(struct platform_device *pdev)
        tegra_mipi_free(dsi->mipi);
 
        regulator_disable(dsi->vdd);
-       clk_disable_unprepare(dsi->clk_parent);
        clk_disable_unprepare(dsi->clk_lp);
        clk_disable_unprepare(dsi->clk);
        reset_control_assert(dsi->rst);
index 5ce610d08d770afa2b9a26087a8c2055ec772edc..bad1006a51509da2868d83115245cb742ec29385 100644 (file)
 #define DSI_INT_STATUS                 0x0d
 #define DSI_INT_MASK                   0x0e
 #define DSI_HOST_CONTROL               0x0f
+#define DSI_HOST_CONTROL_FIFO_RESET    (1 << 21)
+#define DSI_HOST_CONTROL_CRC_RESET     (1 << 20)
+#define DSI_HOST_CONTROL_TX_TRIG_SOL   (0 << 12)
+#define DSI_HOST_CONTROL_TX_TRIG_FIFO  (1 << 12)
+#define DSI_HOST_CONTROL_TX_TRIG_HOST  (2 << 12)
 #define DSI_HOST_CONTROL_RAW           (1 << 6)
 #define DSI_HOST_CONTROL_HS            (1 << 5)
-#define DSI_HOST_CONTROL_BTA           (1 << 2)
+#define DSI_HOST_CONTROL_FIFO_SEL      (1 << 4)
+#define DSI_HOST_CONTROL_IMM_BTA       (1 << 3)
+#define DSI_HOST_CONTROL_PKT_BTA       (1 << 2)
 #define DSI_HOST_CONTROL_CS            (1 << 1)
 #define DSI_HOST_CONTROL_ECC           (1 << 0)
 #define DSI_CONTROL                    0x10
 #define DSI_SOL_DELAY                  0x11
 #define DSI_MAX_THRESHOLD              0x12
 #define DSI_TRIGGER                    0x13
+#define DSI_TRIGGER_HOST               (1 << 1)
+#define DSI_TRIGGER_VIDEO              (1 << 0)
 #define DSI_TX_CRC                     0x14
 #define DSI_STATUS                     0x15
 #define DSI_STATUS_IDLE                        (1 << 10)
+#define DSI_STATUS_UNDERFLOW           (1 <<  9)
+#define DSI_STATUS_OVERFLOW            (1 <<  8)
 #define DSI_INIT_SEQ_CONTROL           0x1a
 #define DSI_INIT_SEQ_DATA_0            0x1b
 #define DSI_INIT_SEQ_DATA_1            0x1c
 #define DSI_PAD_CONTROL_3              0x51
 #define DSI_PAD_CONTROL_4              0x52
 #define DSI_GANGED_MODE_CONTROL                0x53
+#define DSI_GANGED_MODE_CONTROL_ENABLE (1 << 0)
 #define DSI_GANGED_MODE_START          0x54
 #define DSI_GANGED_MODE_SIZE           0x55
 #define DSI_RAW_DATA_BYTE_COUNT                0x56
index 3513d12d5aa1447822a7ef57449799b1aff83df9..e9c715d892614853606c7563c4b335cb300d8c5f 100644 (file)
@@ -65,8 +65,12 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
        for (i = 0; i < fb->num_planes; i++) {
                struct tegra_bo *bo = fb->planes[i];
 
-               if (bo)
+               if (bo) {
+                       if (bo->pages && bo->vaddr)
+                               vunmap(bo->vaddr);
+
                        drm_gem_object_unreference_unlocked(&bo->gem);
+               }
        }
 
        drm_framebuffer_cleanup(framebuffer);
@@ -223,14 +227,16 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
        info = framebuffer_alloc(0, drm->dev);
        if (!info) {
                dev_err(drm->dev, "failed to allocate framebuffer info\n");
-               tegra_bo_free_object(&bo->gem);
+               drm_gem_object_unreference_unlocked(&bo->gem);
                return -ENOMEM;
        }
 
        fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
        if (IS_ERR(fbdev->fb)) {
-               dev_err(drm->dev, "failed to allocate DRM framebuffer\n");
                err = PTR_ERR(fbdev->fb);
+               dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n",
+                       err);
+               drm_gem_object_unreference_unlocked(&bo->gem);
                goto release;
        }
 
@@ -254,6 +260,16 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
        offset = info->var.xoffset * bytes_per_pixel +
                 info->var.yoffset * fb->pitches[0];
 
+       if (bo->pages) {
+               bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP,
+                                pgprot_writecombine(PAGE_KERNEL));
+               if (!bo->vaddr) {
+                       dev_err(drm->dev, "failed to vmap() framebuffer\n");
+                       err = -ENOMEM;
+                       goto destroy;
+               }
+       }
+
        drm->mode_config.fb_base = (resource_size_t)bo->paddr;
        info->screen_base = (void __iomem *)bo->vaddr + offset;
        info->screen_size = size;
@@ -289,6 +305,11 @@ static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm)
        return fbdev;
 }
 
+static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
+{
+       kfree(fbdev);
+}
+
 static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
                            unsigned int preferred_bpp,
                            unsigned int num_crtc,
@@ -299,19 +320,21 @@ static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
 
        err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors);
        if (err < 0) {
-               dev_err(drm->dev, "failed to initialize DRM FB helper\n");
+               dev_err(drm->dev, "failed to initialize DRM FB helper: %d\n",
+                       err);
                return err;
        }
 
        err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
        if (err < 0) {
-               dev_err(drm->dev, "failed to add connectors\n");
+               dev_err(drm->dev, "failed to add connectors: %d\n", err);
                goto fini;
        }
 
        err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
        if (err < 0) {
-               dev_err(drm->dev, "failed to set initial configuration\n");
+               dev_err(drm->dev, "failed to set initial configuration: %d\n",
+                       err);
                goto fini;
        }
 
@@ -322,7 +345,7 @@ fini:
        return err;
 }
 
-static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
+static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
 {
        struct fb_info *info = fbdev->base.fbdev;
 
@@ -341,11 +364,11 @@ static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
 
        if (fbdev->fb) {
                drm_framebuffer_unregister_private(&fbdev->fb->base);
-               tegra_fb_destroy(&fbdev->fb->base);
+               drm_framebuffer_remove(&fbdev->fb->base);
        }
 
        drm_fb_helper_fini(&fbdev->base);
-       kfree(fbdev);
+       tegra_fbdev_free(fbdev);
 }
 
 void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
@@ -393,6 +416,15 @@ int tegra_drm_fb_prepare(struct drm_device *drm)
        return 0;
 }
 
+void tegra_drm_fb_free(struct drm_device *drm)
+{
+#ifdef CONFIG_DRM_TEGRA_FBDEV
+       struct tegra_drm *tegra = drm->dev_private;
+
+       tegra_fbdev_free(tegra->fbdev);
+#endif
+}
+
 int tegra_drm_fb_init(struct drm_device *drm)
 {
 #ifdef CONFIG_DRM_TEGRA_FBDEV
@@ -413,6 +445,6 @@ void tegra_drm_fb_exit(struct drm_device *drm)
 #ifdef CONFIG_DRM_TEGRA_FBDEV
        struct tegra_drm *tegra = drm->dev_private;
 
-       tegra_fbdev_free(tegra->fbdev);
+       tegra_fbdev_exit(tegra->fbdev);
 #endif
 }
index ce023fa3e8ae14bdfc3557295d69a7d8d12f9cd3..da32086cbeaf28bbe0c0528eeeac4c0bb9fcb326 100644 (file)
@@ -14,6 +14,7 @@
  */
 
 #include <linux/dma-buf.h>
+#include <linux/iommu.h>
 #include <drm/tegra_drm.h>
 
 #include "drm.h"
@@ -91,13 +92,90 @@ static const struct host1x_bo_ops tegra_bo_ops = {
        .kunmap = tegra_bo_kunmap,
 };
 
-static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
+/*
+ * A generic iommu_map_sg() function is being reviewed and will hopefully be
+ * merged soon. At that point this function can be dropped in favour of the
+ * one provided by the IOMMU API.
+ */
+static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+                             struct scatterlist *sg, unsigned int nents,
+                             int prot)
 {
-       dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
+       struct scatterlist *s;
+       size_t offset = 0;
+       unsigned int i;
+       int err;
+
+       for_each_sg(sg, s, nents, i) {
+               phys_addr_t phys = page_to_phys(sg_page(s));
+               size_t length = s->offset + s->length;
+
+               err = iommu_map(domain, iova + offset, phys, length, prot);
+               if (err < 0) {
+                       iommu_unmap(domain, iova, offset);
+                       return err;
+               }
+
+               offset += length;
+       }
+
+       return offset;
 }
 
-struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
-                                unsigned long flags)
+static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
+{
+       int prot = IOMMU_READ | IOMMU_WRITE;
+       ssize_t err;
+
+       if (bo->mm)
+               return -EBUSY;
+
+       bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
+       if (!bo->mm)
+               return -ENOMEM;
+
+       err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
+                                        PAGE_SIZE, 0, 0, 0);
+       if (err < 0) {
+               dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
+                       err);
+               goto free;
+       }
+
+       bo->paddr = bo->mm->start;
+
+       err = __iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
+                            bo->sgt->nents, prot);
+       if (err < 0) {
+               dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
+               goto remove;
+       }
+
+       bo->size = err;
+
+       return 0;
+
+remove:
+       drm_mm_remove_node(bo->mm);
+free:
+       kfree(bo->mm);
+       return err;
+}
+
+static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
+{
+       if (!bo->mm)
+               return 0;
+
+       iommu_unmap(tegra->domain, bo->paddr, bo->size);
+       drm_mm_remove_node(bo->mm);
+       kfree(bo->mm);
+
+       return 0;
+}
+
+static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
+                                             size_t size)
 {
        struct tegra_bo *bo;
        int err;
@@ -109,22 +187,96 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
        host1x_bo_init(&bo->base, &tegra_bo_ops);
        size = round_up(size, PAGE_SIZE);
 
-       bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
-                                          GFP_KERNEL | __GFP_NOWARN);
-       if (!bo->vaddr) {
-               dev_err(drm->dev, "failed to allocate buffer with size %u\n",
-                       size);
-               err = -ENOMEM;
-               goto err_dma;
-       }
-
        err = drm_gem_object_init(drm, &bo->gem, size);
-       if (err)
-               goto err_init;
+       if (err < 0)
+               goto free;
 
        err = drm_gem_create_mmap_offset(&bo->gem);
-       if (err)
-               goto err_mmap;
+       if (err < 0)
+               goto release;
+
+       return bo;
+
+release:
+       drm_gem_object_release(&bo->gem);
+free:
+       kfree(bo);
+       return ERR_PTR(err);
+}
+
+static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
+{
+       if (bo->pages) {
+               drm_gem_put_pages(&bo->gem, bo->pages, true, true);
+               sg_free_table(bo->sgt);
+               kfree(bo->sgt);
+       } else if (bo->vaddr) {
+               dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr,
+                                     bo->paddr);
+       }
+}
+
+static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo,
+                             size_t size)
+{
+       bo->pages = drm_gem_get_pages(&bo->gem);
+       if (IS_ERR(bo->pages))
+               return PTR_ERR(bo->pages);
+
+       bo->num_pages = size >> PAGE_SHIFT;
+
+       bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
+       if (IS_ERR(bo->sgt)) {
+               drm_gem_put_pages(&bo->gem, bo->pages, false, false);
+               return PTR_ERR(bo->sgt);
+       }
+
+       return 0;
+}
+
+static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
+                         size_t size)
+{
+       struct tegra_drm *tegra = drm->dev_private;
+       int err;
+
+       if (tegra->domain) {
+               err = tegra_bo_get_pages(drm, bo, size);
+               if (err < 0)
+                       return err;
+
+               err = tegra_bo_iommu_map(tegra, bo);
+               if (err < 0) {
+                       tegra_bo_free(drm, bo);
+                       return err;
+               }
+       } else {
+               bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
+                                                  GFP_KERNEL | __GFP_NOWARN);
+               if (!bo->vaddr) {
+                       dev_err(drm->dev,
+                               "failed to allocate buffer of size %zu\n",
+                               size);
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
+                                unsigned long flags)
+{
+       struct tegra_bo *bo;
+       int err;
+
+       bo = tegra_bo_alloc_object(drm, size);
+       if (IS_ERR(bo))
+               return bo;
+
+       err = tegra_bo_alloc(drm, bo, size);
+       if (err < 0)
+               goto release;
 
        if (flags & DRM_TEGRA_GEM_CREATE_TILED)
                bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
@@ -134,69 +286,52 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
 
        return bo;
 
-err_mmap:
+release:
        drm_gem_object_release(&bo->gem);
-err_init:
-       tegra_bo_destroy(drm, bo);
-err_dma:
        kfree(bo);
-
        return ERR_PTR(err);
 }
 
 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
                                             struct drm_device *drm,
-                                            unsigned int size,
+                                            size_t size,
                                             unsigned long flags,
-                                            unsigned int *handle)
+                                            u32 *handle)
 {
        struct tegra_bo *bo;
-       int ret;
+       int err;
 
        bo = tegra_bo_create(drm, size, flags);
        if (IS_ERR(bo))
                return bo;
 
-       ret = drm_gem_handle_create(file, &bo->gem, handle);
-       if (ret)
-               goto err;
+       err = drm_gem_handle_create(file, &bo->gem, handle);
+       if (err) {
+               tegra_bo_free_object(&bo->gem);
+               return ERR_PTR(err);
+       }
 
        drm_gem_object_unreference_unlocked(&bo->gem);
 
        return bo;
-
-err:
-       tegra_bo_free_object(&bo->gem);
-       return ERR_PTR(ret);
 }
 
 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
                                        struct dma_buf *buf)
 {
+       struct tegra_drm *tegra = drm->dev_private;
        struct dma_buf_attachment *attach;
        struct tegra_bo *bo;
-       ssize_t size;
        int err;
 
-       bo = kzalloc(sizeof(*bo), GFP_KERNEL);
-       if (!bo)
-               return ERR_PTR(-ENOMEM);
-
-       host1x_bo_init(&bo->base, &tegra_bo_ops);
-       size = round_up(buf->size, PAGE_SIZE);
-
-       err = drm_gem_object_init(drm, &bo->gem, size);
-       if (err < 0)
-               goto free;
-
-       err = drm_gem_create_mmap_offset(&bo->gem);
-       if (err < 0)
-               goto release;
+       bo = tegra_bo_alloc_object(drm, buf->size);
+       if (IS_ERR(bo))
+               return bo;
 
        attach = dma_buf_attach(buf, drm->dev);
        if (IS_ERR(attach)) {
                err = PTR_ERR(attach);
-               goto free_mmap;
+               goto free;
        }
 
        get_dma_buf(buf);
@@ -212,12 +347,19 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
                goto detach;
        }
 
-       if (bo->sgt->nents > 1) {
-               err = -EINVAL;
-               goto detach;
+       if (tegra->domain) {
+               err = tegra_bo_iommu_map(tegra, bo);
+               if (err < 0)
+                       goto detach;
+       } else {
+               if (bo->sgt->nents > 1) {
+                       err = -EINVAL;
+                       goto detach;
+               }
+
+               bo->paddr = sg_dma_address(bo->sgt->sgl);
        }
 
-       bo->paddr = sg_dma_address(bo->sgt->sgl);
        bo->gem.import_attach = attach;
 
        return bo;
@@ -228,47 +370,41 @@ detach:
 
        dma_buf_detach(buf, attach);
        dma_buf_put(buf);
-free_mmap:
-       drm_gem_free_mmap_offset(&bo->gem);
-release:
-       drm_gem_object_release(&bo->gem);
 free:
+       drm_gem_object_release(&bo->gem);
        kfree(bo);
-
        return ERR_PTR(err);
 }
 
 void tegra_bo_free_object(struct drm_gem_object *gem)
 {
+       struct tegra_drm *tegra = gem->dev->dev_private;
        struct tegra_bo *bo = to_tegra_bo(gem);
 
+       if (tegra->domain)
+               tegra_bo_iommu_unmap(tegra, bo);
+
        if (gem->import_attach) {
                dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
                                         DMA_TO_DEVICE);
                drm_prime_gem_destroy(gem, NULL);
        } else {
-               tegra_bo_destroy(gem->dev, bo);
+               tegra_bo_free(gem->dev, bo);
        }
 
-       drm_gem_free_mmap_offset(gem);
        drm_gem_object_release(gem);
-
        kfree(bo);
 }
 
 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
                         struct drm_mode_create_dumb *args)
 {
-       int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+       unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
        struct tegra_drm *tegra = drm->dev_private;
        struct tegra_bo *bo;
 
-       min_pitch = round_up(min_pitch, tegra->pitch_align);
-       if (args->pitch < min_pitch)
-               args->pitch = min_pitch;
-
-       if (args->size < args->pitch * args->height)
-               args->size = args->pitch * args->height;
+       args->pitch = round_up(min_pitch, tegra->pitch_align);
+       args->size = args->pitch * args->height;
 
        bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
                                         &args->handle);
@@ -279,7 +415,7 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
 }
 
 int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
-                            uint32_t handle, uint64_t *offset)
+                            u32 handle, u64 *offset)
 {
        struct drm_gem_object *gem;
        struct tegra_bo *bo;
@@ -304,7 +440,38 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
        return 0;
 }
 
+static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_gem_object *gem = vma->vm_private_data;
+       struct tegra_bo *bo = to_tegra_bo(gem);
+       struct page *page;
+       pgoff_t offset;
+       int err;
+
+       if (!bo->pages)
+               return VM_FAULT_SIGBUS;
+
+       offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
+       page = bo->pages[offset];
+
+       err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+       switch (err) {
+       case -EAGAIN:
+       case 0:
+       case -ERESTARTSYS:
+       case -EINTR:
+       case -EBUSY:
+               return VM_FAULT_NOPAGE;
+
+       case -ENOMEM:
+               return VM_FAULT_OOM;
+       }
+
+       return VM_FAULT_SIGBUS;
+}
+
 const struct vm_operations_struct tegra_bo_vm_ops = {
+       .fault = tegra_bo_fault,
        .open = drm_gem_vm_open,
        .close = drm_gem_vm_close,
 };
@@ -322,12 +489,30 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
        gem = vma->vm_private_data;
        bo = to_tegra_bo(gem);
 
-       ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT,
-                             vma->vm_end - vma->vm_start, vma->vm_page_prot);
-       if (ret)
-               drm_gem_vm_close(vma);
+       if (!bo->pages) {
+               unsigned long vm_pgoff = vma->vm_pgoff;
+
+               vma->vm_flags &= ~VM_PFNMAP;
+               vma->vm_pgoff = 0;
+
+               ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr,
+                                           bo->paddr, gem->size);
+               if (ret) {
+                       drm_gem_vm_close(vma);
+                       return ret;
+               }
+
+               vma->vm_pgoff = vm_pgoff;
+       } else {
+               pgprot_t prot = vm_get_page_prot(vma->vm_flags);
+
+               vma->vm_flags |= VM_MIXEDMAP;
+               vma->vm_flags &= ~VM_PFNMAP;
 
-       return ret;
+               vma->vm_page_prot = pgprot_writecombine(prot);
+       }
+
+       return 0;
 }
 
 static struct sg_table *
@@ -342,21 +527,44 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
        if (!sgt)
                return NULL;
 
-       if (sg_alloc_table(sgt, 1, GFP_KERNEL)) {
-               kfree(sgt);
-               return NULL;
-       }
+       if (bo->pages) {
+               struct scatterlist *sg;
+               unsigned int i;
 
-       sg_dma_address(sgt->sgl) = bo->paddr;
-       sg_dma_len(sgt->sgl) = gem->size;
+               if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
+                       goto free;
+
+               for_each_sg(sgt->sgl, sg, bo->num_pages, i)
+                       sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
+
+               if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+                       goto free;
+       } else {
+               if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+                       goto free;
+
+               sg_dma_address(sgt->sgl) = bo->paddr;
+               sg_dma_len(sgt->sgl) = gem->size;
+       }
 
        return sgt;
+
+free:
+       sg_free_table(sgt);
+       kfree(sgt);
+       return NULL;
 }
 
 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
                                          struct sg_table *sgt,
                                          enum dma_data_direction dir)
 {
+       struct drm_gem_object *gem = attach->dmabuf->priv;
+       struct tegra_bo *bo = to_tegra_bo(gem);
+
+       if (bo->pages)
+               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
        sg_free_table(sgt);
        kfree(sgt);
 }
index 6538b56780c2a916e817d44080e952c37bba8a89..6c5f12ac0087a0b8baf538f435d244f2ffa1537b 100644 (file)
@@ -38,6 +38,12 @@ struct tegra_bo {
        dma_addr_t paddr;
        void *vaddr;
 
+       struct drm_mm_node *mm;
+       unsigned long num_pages;
+       struct page **pages;
+       /* size of IOMMU mapping */
+       size_t size;
+
        struct tegra_bo_tiling tiling;
 };
 
@@ -46,18 +52,18 @@ static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
        return container_of(gem, struct tegra_bo, gem);
 }
 
-struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
                                 unsigned long flags);
 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
                                             struct drm_device *drm,
-                                            unsigned int size,
+                                            size_t size,
                                             unsigned long flags,
-                                            unsigned int *handle);
+                                            u32 *handle);
 void tegra_bo_free_object(struct drm_gem_object *gem);
 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
                         struct drm_mode_create_dumb *args);
 int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
-                            uint32_t handle, uint64_t *offset);
+                            u32 handle, u64 *offset);
 
 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
 
index 0c67d7eebc94876d645fe0827485bdc13a998035..6a5c7b81fbc5d6453c7b96ab57339400504a913c 100644 (file)
@@ -157,22 +157,18 @@ static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
 
 static void tegra_encoder_prepare(struct drm_encoder *encoder)
 {
+       tegra_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 }
 
 static void tegra_encoder_commit(struct drm_encoder *encoder)
 {
+       tegra_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
 }
 
 static void tegra_encoder_mode_set(struct drm_encoder *encoder,
                                   struct drm_display_mode *mode,
                                   struct drm_display_mode *adjusted)
 {
-       struct tegra_output *output = encoder_to_output(encoder);
-       int err;
-
-       err = tegra_output_enable(output);
-       if (err < 0)
-               dev_err(encoder->dev->dev, "tegra_output_enable(): %d\n", err);
 }
 
 static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
@@ -187,7 +183,8 @@ static irqreturn_t hpd_irq(int irq, void *data)
 {
        struct tegra_output *output = data;
 
-       drm_helper_hpd_irq_event(output->connector.dev);
+       if (output->connector.dev)
+               drm_helper_hpd_irq_event(output->connector.dev);
 
        return IRQ_HANDLED;
 }
@@ -259,6 +256,13 @@ int tegra_output_probe(struct tegra_output *output)
                }
 
                output->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+               /*
+                * Disable the interrupt until the connector has been
+                * initialized to avoid a race in the hotplug interrupt
+                * handler.
+                */
+               disable_irq(output->hpd_irq);
        }
 
        return 0;
@@ -324,10 +328,27 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
 
        output->encoder.possible_crtcs = 0x3;
 
+       /*
+        * The connector is now registered and ready to receive hotplug events
+        * so the hotplug interrupt can be enabled.
+        */
+       if (gpio_is_valid(output->hpd_gpio))
+               enable_irq(output->hpd_irq);
+
        return 0;
 }
 
 int tegra_output_exit(struct tegra_output *output)
 {
+       /*
+        * The connector is going away, so the interrupt must be disabled to
+        * prevent the hotplug interrupt handler from potentially crashing.
+        */
+       if (gpio_is_valid(output->hpd_gpio))
+               disable_irq(output->hpd_irq);
+
+       if (output->panel)
+               drm_panel_detach(output->panel);
+
        return 0;
 }
index d642d4a0213491d099d5a7a4184fc273dc789a8f..c73588483be02e0ba97e52017ff972e59841c4d7 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include "drm_flip_work.h"
+#include <drm/drm_plane_helper.h>
 
 #include "tilcdc_drv.h"
 #include "tilcdc_regs.h"
@@ -664,12 +665,8 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
        tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
        init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
 
-       ret = drm_flip_work_init(&tilcdc_crtc->unref_work, 16,
+       drm_flip_work_init(&tilcdc_crtc->unref_work,
                        "unref", unref_worker);
-       if (ret) {
-               dev_err(dev->dev, "could not allocate unref FIFO\n");
-               goto fail;
-       }
 
        ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
        if (ret < 0)
index 79a34cbd29f5260df24da1ae86d460e1a0df3c6f..d56d3f8b8d6bc676a4e770b3e2279812b56d0229 100644 (file)
@@ -58,8 +58,7 @@ static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
 static void tilcdc_fb_output_poll_changed(struct drm_device *dev)
 {
        struct tilcdc_drm_private *priv = dev->dev_private;
-       if (priv->fbdev)
-               drm_fbdev_cma_hotplug_event(priv->fbdev);
+       drm_fbdev_cma_hotplug_event(priv->fbdev);
 }
 
 static const struct drm_mode_config_funcs mode_config_funcs = {
index 964387fc5c8f860544704ae3ce21d50084448361..aa0bd054d3e95c148f63072a39f82059ba2b366c 100644 (file)
@@ -55,6 +55,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
        struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
        struct drm_mm *mm = &rman->mm;
        struct drm_mm_node *node = NULL;
+       enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
        enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
        unsigned long lpfn;
        int ret;
@@ -67,15 +68,16 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
        if (!node)
                return -ENOMEM;
 
-       if (place->flags & TTM_PL_FLAG_TOPDOWN)
+       if (place->flags & TTM_PL_FLAG_TOPDOWN) {
+               sflags = DRM_MM_SEARCH_BELOW;
                aflags = DRM_MM_CREATE_TOP;
+       }
 
        spin_lock(&rman->lock);
        ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
                                          mem->page_alignment, 0,
                                          place->fpfn, lpfn,
-                                         DRM_MM_SEARCH_BEST,
-                                         aflags);
+                                         sflags, aflags);
        spin_unlock(&rman->lock);
 
        if (unlikely(ret)) {
index 8ce508e76208a0f5ce8c12b1da9e54111aac7ad6..3820ae97a03039cf0d95ee3b473642b589262299 100644 (file)
@@ -93,7 +93,8 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
  */
 
 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
-                          struct list_head *list, bool intr)
+                          struct list_head *list, bool intr,
+                          struct list_head *dups)
 {
        struct ttm_bo_global *glob;
        struct ttm_validate_buffer *entry;
@@ -117,6 +118,13 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
                        __ttm_bo_unreserve(bo);
 
                        ret = -EBUSY;
+
+               } else if (ret == -EALREADY && dups) {
+                       struct ttm_validate_buffer *safe = entry;
+                       entry = list_prev_entry(entry, head);
+                       list_del(&safe->head);
+                       list_add(&safe->head, dups);
+                       continue;
                }
 
                if (!ret) {
index 09874d695188067dca03540efd5b2f135a75b913..025c429050c06c4079f5b471128e499463abc753 100644 (file)
@@ -297,11 +297,12 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
  *
  * @pool: to free the pages from
  * @free_all: If set to true will free all pages in pool
- * @gfp: GFP flags.
+ * @use_static: Safe to use static buffer
  **/
 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
-                             gfp_t gfp)
+                             bool use_static)
 {
+       static struct page *static_buf[NUM_PAGES_TO_ALLOC];
        unsigned long irq_flags;
        struct page *p;
        struct page **pages_to_free;
@@ -311,7 +312,11 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
        if (NUM_PAGES_TO_ALLOC < nr_free)
                npages_to_free = NUM_PAGES_TO_ALLOC;
 
-       pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
+       if (use_static)
+               pages_to_free = static_buf;
+       else
+               pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+                                       GFP_KERNEL);
        if (!pages_to_free) {
                pr_err("Failed to allocate memory for pool free operation\n");
                return 0;
@@ -374,7 +379,8 @@ restart:
        if (freed_pages)
                ttm_pages_put(pages_to_free, freed_pages);
 out:
-       kfree(pages_to_free);
+       if (pages_to_free != static_buf)
+               kfree(pages_to_free);
        return nr_free;
 }
 
@@ -383,8 +389,6 @@ out:
  *
  * XXX: (dchinner) Deadlock warning!
  *
- * We need to pass sc->gfp_mask to ttm_page_pool_free().
- *
  * This code is crying out for a shrinker per pool....
  */
 static unsigned long
@@ -407,8 +411,8 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
                if (shrink_pages == 0)
                        break;
                pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
-               shrink_pages = ttm_page_pool_free(pool, nr_free,
-                                                 sc->gfp_mask);
+               /* OK to use static buffer since global mutex is held. */
+               shrink_pages = ttm_page_pool_free(pool, nr_free, true);
                freed += nr_free - shrink_pages;
        }
        mutex_unlock(&lock);
@@ -710,7 +714,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
        }
        spin_unlock_irqrestore(&pool->lock, irq_flags);
        if (npages)
-               ttm_page_pool_free(pool, npages, GFP_KERNEL);
+               ttm_page_pool_free(pool, npages, false);
 }
 
 /*
@@ -849,9 +853,9 @@ void ttm_page_alloc_fini(void)
        pr_info("Finalizing pool allocator\n");
        ttm_pool_mm_shrink_fini(_manager);
 
+       /* OK to use static buffer since global mutex is no longer used. */
        for (i = 0; i < NUM_POOLS; ++i)
-               ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
-                                  GFP_KERNEL);
+               ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
 
        kobject_put(&_manager->kobj);
        _manager = NULL;
index c96db433f8af834398f6496da9498db98013a7c7..01e1d27eb078396cd97dc2f3a51cbda8dec95a9f 100644 (file)
@@ -411,11 +411,12 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
  *
  * @pool: to free the pages from
  * @nr_free: If set to true will free all pages in pool
- * @gfp: GFP flags.
+ * @use_static: Safe to use static buffer
  **/
 static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
-                                      gfp_t gfp)
+                                      bool use_static)
 {
+       static struct page *static_buf[NUM_PAGES_TO_ALLOC];
        unsigned long irq_flags;
        struct dma_page *dma_p, *tmp;
        struct page **pages_to_free;
@@ -432,7 +433,11 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
                         npages_to_free, nr_free);
        }
 #endif
-       pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
+       if (use_static)
+               pages_to_free = static_buf;
+       else
+               pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+                                       GFP_KERNEL);
 
        if (!pages_to_free) {
                pr_err("%s: Failed to allocate memory for pool free operation\n",
@@ -502,7 +507,8 @@ restart:
        if (freed_pages)
                ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
 out:
-       kfree(pages_to_free);
+       if (pages_to_free != static_buf)
+               kfree(pages_to_free);
        return nr_free;
 }
 
@@ -531,7 +537,8 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
                if (pool->type != type)
                        continue;
                /* Takes a spinlock.. */
-               ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
+               /* OK to use static buffer since global mutex is held. */
+               ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
                WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
                /* This code path is called after _all_ references to the
                 * struct device has been dropped - so nobody should be
@@ -986,7 +993,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
 
        /* shrink pool if necessary (only on !is_cached pools)*/
        if (npages)
-               ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
+               ttm_dma_page_pool_free(pool, npages, false);
        ttm->state = tt_unpopulated;
 }
 EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
@@ -996,8 +1003,6 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
  *
  * XXX: (dchinner) Deadlock warning!
  *
- * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
- *
  * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
  * shrinkers
  */
@@ -1030,8 +1035,8 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
                if (++idx < pool_offset)
                        continue;
                nr_free = shrink_pages;
-               shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
-                                                     sc->gfp_mask);
+               /* OK to use static buffer since global mutex is held. */
+               shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
                freed += nr_free - shrink_pages;
 
                pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
index 05c7481bfd40d3ff8b49e7ffd6676d5a37eae600..195bcac0b6c8d9e429964d5e942ad1b7a2d9d74d 100644 (file)
@@ -1,6 +1,6 @@
 
 ccflags-y := -Iinclude/drm
 
-udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o
+udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o udl_dmabuf.o
 
 obj-$(CONFIG_DRM_UDL) := udl.o
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
new file mode 100644 (file)
index 0000000..ac8a66b
--- /dev/null
@@ -0,0 +1,276 @@
+/*
+ * udl_dmabuf.c
+ *
+ * Copyright (c) 2014 The Chromium OS Authors
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <drm/drmP.h>
+#include "udl_drv.h"
+#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
+
+struct udl_drm_dmabuf_attachment {
+       struct sg_table sgt;
+       enum dma_data_direction dir;
+       bool is_mapped;
+};
+
+static int udl_attach_dma_buf(struct dma_buf *dmabuf,
+                             struct device *dev,
+                             struct dma_buf_attachment *attach)
+{
+       struct udl_drm_dmabuf_attachment *udl_attach;
+
+       DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
+                       attach->dmabuf->size);
+
+       udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
+       if (!udl_attach)
+               return -ENOMEM;
+
+       udl_attach->dir = DMA_NONE;
+       attach->priv = udl_attach;
+
+       return 0;
+}
+
+static void udl_detach_dma_buf(struct dma_buf *dmabuf,
+                              struct dma_buf_attachment *attach)
+{
+       struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
+       struct sg_table *sgt;
+
+       if (!udl_attach)
+               return;
+
+       DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
+                       attach->dmabuf->size);
+
+       sgt = &udl_attach->sgt;
+
+       if (udl_attach->dir != DMA_NONE)
+               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+                               udl_attach->dir);
+
+       sg_free_table(sgt);
+       kfree(udl_attach);
+       attach->priv = NULL;
+}
+
+static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
+                                       enum dma_data_direction dir)
+{
+       struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
+       struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
+       struct drm_device *dev = obj->base.dev;
+       struct scatterlist *rd, *wr;
+       struct sg_table *sgt = NULL;
+       unsigned int i;
+       int page_count;
+       int nents, ret;
+
+       DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
+                       attach->dmabuf->size, dir);
+
+       /* just return current sgt if already requested. */
+       if (udl_attach->dir == dir && udl_attach->is_mapped)
+               return &udl_attach->sgt;
+
+       if (!obj->pages) {
+               ret = udl_gem_get_pages(obj);
+               if (ret) {
+                       DRM_ERROR("failed to map pages.\n");
+                       return ERR_PTR(ret);
+               }
+       }
+
+       page_count = obj->base.size / PAGE_SIZE;
+       obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
+       if (IS_ERR(obj->sg)) {
+               DRM_ERROR("failed to allocate sgt.\n");
+               return ERR_CAST(obj->sg);
+       }
+
+       sgt = &udl_attach->sgt;
+
+       ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
+       if (ret) {
+               DRM_ERROR("failed to alloc sgt.\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       mutex_lock(&dev->struct_mutex);
+
+       rd = obj->sg->sgl;
+       wr = sgt->sgl;
+       for (i = 0; i < sgt->orig_nents; ++i) {
+               sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+               rd = sg_next(rd);
+               wr = sg_next(wr);
+       }
+
+       if (dir != DMA_NONE) {
+               nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+               if (!nents) {
+                       DRM_ERROR("failed to map sgl with iommu.\n");
+                       sg_free_table(sgt);
+                       sgt = ERR_PTR(-EIO);
+                       goto err_unlock;
+               }
+       }
+
+       udl_attach->is_mapped = true;
+       udl_attach->dir = dir;
+       attach->priv = udl_attach;
+
+err_unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return sgt;
+}
+
+static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
+                             struct sg_table *sgt,
+                             enum dma_data_direction dir)
+{
+       /* Nothing to do. */
+       DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
+                       attach->dmabuf->size, dir);
+}
+
+static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+       /* TODO */
+
+       return NULL;
+}
+
+static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+                                   unsigned long page_num)
+{
+       /* TODO */
+
+       return NULL;
+}
+
+static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
+                             unsigned long page_num, void *addr)
+{
+       /* TODO */
+}
+
+static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+                                    unsigned long page_num,
+                                    void *addr)
+{
+       /* TODO */
+}
+
+static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
+                          struct vm_area_struct *vma)
+{
+       /* TODO */
+
+       return -EINVAL;
+}
+
+static struct dma_buf_ops udl_dmabuf_ops = {
+       .attach                 = udl_attach_dma_buf,
+       .detach                 = udl_detach_dma_buf,
+       .map_dma_buf            = udl_map_dma_buf,
+       .unmap_dma_buf          = udl_unmap_dma_buf,
+       .kmap                   = udl_dmabuf_kmap,
+       .kmap_atomic            = udl_dmabuf_kmap_atomic,
+       .kunmap                 = udl_dmabuf_kunmap,
+       .kunmap_atomic          = udl_dmabuf_kunmap_atomic,
+       .mmap                   = udl_dmabuf_mmap,
+       .release                = drm_gem_dmabuf_release,
+};
+
+struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
+                                    struct drm_gem_object *obj, int flags)
+{
+       return dma_buf_export(obj, &udl_dmabuf_ops, obj->size, flags, NULL);
+}
+
+static int udl_prime_create(struct drm_device *dev,
+                           size_t size,
+                           struct sg_table *sg,
+                           struct udl_gem_object **obj_p)
+{
+       struct udl_gem_object *obj;
+       int npages;
+
+       npages = size / PAGE_SIZE;
+
+       *obj_p = NULL;
+       obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
+       if (!obj)
+               return -ENOMEM;
+
+       obj->sg = sg;
+       obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+       if (obj->pages == NULL) {
+               DRM_ERROR("obj pages is NULL %d\n", npages);
+               return -ENOMEM;
+       }
+
+       drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
+
+       *obj_p = obj;
+       return 0;
+}
+
+struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
+                               struct dma_buf *dma_buf)
+{
+       struct dma_buf_attachment *attach;
+       struct sg_table *sg;
+       struct udl_gem_object *uobj;
+       int ret;
+
+       /* need to attach */
+       get_device(dev->dev);
+       attach = dma_buf_attach(dma_buf, dev->dev);
+       if (IS_ERR(attach)) {
+               put_device(dev->dev);
+               return ERR_CAST(attach);
+       }
+
+       get_dma_buf(dma_buf);
+
+       sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+       if (IS_ERR(sg)) {
+               ret = PTR_ERR(sg);
+               goto fail_detach;
+       }
+
+       ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
+       if (ret)
+               goto fail_unmap;
+
+       uobj->base.import_attach = attach;
+       uobj->flags = UDL_BO_WC;
+
+       return &uobj->base;
+
+fail_unmap:
+       dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+       dma_buf_detach(dma_buf, attach);
+       dma_buf_put(dma_buf);
+       put_device(dev->dev);
+       return ERR_PTR(ret);
+}
index 8607e9e513db0d94129ea524b9a4abe5dd9653af..d5728ec8525443246aec49b508db2827375242bd 100644 (file)
@@ -51,7 +51,9 @@ static struct drm_driver driver = {
        .dumb_destroy = drm_gem_dumb_destroy,
        .fops = &udl_driver_fops,
 
+       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+       .gem_prime_export = udl_gem_prime_export,
        .gem_prime_import = udl_gem_prime_import,
 
        .name = DRIVER_NAME,
index c7490a2489a750c2bc884761300b6b324c5b572a..80adbac82bdedcfb99db9164575ffa6b8787a9f8 100644 (file)
@@ -25,6 +25,9 @@
 #define DRIVER_MINOR           0
 #define DRIVER_PATCHLEVEL      1
 
+#define UDL_BO_CACHEABLE               (1 << 0)
+#define UDL_BO_WC              (1 << 1)
+
 struct udl_device;
 
 struct urb_node {
@@ -69,6 +72,7 @@ struct udl_gem_object {
        struct page **pages;
        void *vmapping;
        struct sg_table *sg;
+       unsigned int flags;
 };
 
 #define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
@@ -120,9 +124,13 @@ int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
 void udl_gem_free_object(struct drm_gem_object *gem_obj);
 struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
                                            size_t size);
+struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
+                                    struct drm_gem_object *obj, int flags);
 struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
                                struct dma_buf *dma_buf);
 
+int udl_gem_get_pages(struct udl_gem_object *obj);
+void udl_gem_put_pages(struct udl_gem_object *obj);
 int udl_gem_vmap(struct udl_gem_object *obj);
 void udl_gem_vunmap(struct udl_gem_object *obj);
 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
index 8044f5fb7c49a1f5709c6e1d62be72a798d465b8..2a0a784ab6eec37c34143f250fae9851f5d3ea94 100644 (file)
@@ -25,6 +25,7 @@ struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
                return NULL;
        }
 
+       obj->flags = UDL_BO_CACHEABLE;
        return obj;
 }
 
@@ -56,6 +57,23 @@ udl_gem_create(struct drm_file *file,
        return 0;
 }
 
+static void update_vm_cache_attr(struct udl_gem_object *obj,
+                                struct vm_area_struct *vma)
+{
+       DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
+
+       /* non-cacheable as default. */
+       if (obj->flags & UDL_BO_CACHEABLE) {
+               vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+       } else if (obj->flags & UDL_BO_WC) {
+               vma->vm_page_prot =
+                       pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+       } else {
+               vma->vm_page_prot =
+                       pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+       }
+}
+
 int udl_dumb_create(struct drm_file *file,
                    struct drm_device *dev,
                    struct drm_mode_create_dumb *args)
@@ -77,6 +95,8 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
        vma->vm_flags &= ~VM_PFNMAP;
        vma->vm_flags |= VM_MIXEDMAP;
 
+       update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);
+
        return ret;
 }
 
@@ -107,7 +127,7 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 }
 
-static int udl_gem_get_pages(struct udl_gem_object *obj)
+int udl_gem_get_pages(struct udl_gem_object *obj)
 {
        struct page **pages;
 
@@ -123,7 +143,7 @@ static int udl_gem_get_pages(struct udl_gem_object *obj)
        return 0;
 }
 
-static void udl_gem_put_pages(struct udl_gem_object *obj)
+void udl_gem_put_pages(struct udl_gem_object *obj)
 {
        if (obj->base.import_attach) {
                drm_free_large(obj->pages);
@@ -164,8 +184,7 @@ void udl_gem_vunmap(struct udl_gem_object *obj)
                return;
        }
 
-       if (obj->vmapping)
-               vunmap(obj->vmapping);
+       vunmap(obj->vmapping);
 
        udl_gem_put_pages(obj);
 }
@@ -220,73 +239,3 @@ unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
 }
-
-static int udl_prime_create(struct drm_device *dev,
-                           size_t size,
-                           struct sg_table *sg,
-                           struct udl_gem_object **obj_p)
-{
-       struct udl_gem_object *obj;
-       int npages;
-
-       npages = size / PAGE_SIZE;
-
-       *obj_p = NULL;
-       obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
-       if (!obj)
-               return -ENOMEM;
-
-       obj->sg = sg;
-       obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
-       if (obj->pages == NULL) {
-               DRM_ERROR("obj pages is NULL %d\n", npages);
-               return -ENOMEM;
-       }
-
-       drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
-
-       *obj_p = obj;
-       return 0;
-}
-
-struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
-                               struct dma_buf *dma_buf)
-{
-       struct dma_buf_attachment *attach;
-       struct sg_table *sg;
-       struct udl_gem_object *uobj;
-       int ret;
-
-       /* need to attach */
-       get_device(dev->dev);
-       attach = dma_buf_attach(dma_buf, dev->dev);
-       if (IS_ERR(attach)) {
-               put_device(dev->dev);
-               return ERR_CAST(attach);
-       }
-
-       get_dma_buf(dma_buf);
-
-       sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
-       if (IS_ERR(sg)) {
-               ret = PTR_ERR(sg);
-               goto fail_detach;
-       }
-
-       ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
-       if (ret) {
-               goto fail_unmap;
-       }
-
-       uobj->base.import_attach = attach;
-
-       return &uobj->base;
-
-fail_unmap:
-       dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
-fail_detach:
-       dma_buf_detach(dma_buf, attach);
-       dma_buf_put(dma_buf);
-       put_device(dev->dev);
-       return ERR_PTR(ret);
-}
index dc145d320b25abe3dab04ef9e60f75d4eddaa9ae..1701f1dfb23f50044145fceeb1d94d8caed7bbd7 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
 #include "udl_drv.h"
 
 /*
index 25f3c250fd98635ff50a7249f40e691686519674..7b5d22110f25e7619c37eac7fd66858fa25b93e8 100644 (file)
@@ -889,8 +889,7 @@ static int vmw_driver_unload(struct drm_device *dev)
 
        if (dev_priv->ctx.res_ht_initialized)
                drm_ht_remove(&dev_priv->ctx.res_ht);
-       if (dev_priv->ctx.cmd_bounce)
-               vfree(dev_priv->ctx.cmd_bounce);
+       vfree(dev_priv->ctx.cmd_bounce);
        if (dev_priv->enable_fb) {
                vmw_fb_close(dev_priv);
                vmw_kms_restore_vga(dev_priv);
@@ -1063,8 +1062,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
 
        vmaster = vmw_master_check(dev, file_priv, flags);
        if (unlikely(IS_ERR(vmaster))) {
-               DRM_INFO("IOCTL ERROR %d\n", nr);
-               return PTR_ERR(vmaster);
+               ret = PTR_ERR(vmaster);
+
+               if (ret != -ERESTARTSYS)
+                       DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
+                                nr, ret);
+               return ret;
        }
 
        ret = ioctl_func(filp, cmd, arg);
index 596cd6dafd338c2f2e21a6461eb9c93676c487b3..33176d05db3542903f1c919b59f68cad2da22044 100644 (file)
@@ -2487,7 +2487,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        if (unlikely(ret != 0))
                goto out_err_nores;
 
-       ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true);
+       ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
+                                    true, NULL);
        if (unlikely(ret != 0))
                goto out_err;
 
@@ -2677,7 +2678,8 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
        query_val.shared = false;
        list_add_tail(&query_val.head, &validate_list);
 
-       ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
+       ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
+                                    false, NULL);
        if (unlikely(ret != 0)) {
                vmw_execbuf_unpin_panic(dev_priv);
                goto out_no_reserve;
index 197164fd7803730c1759728ae9d53fa586f739da..b7594cb758afc4299493122f4f6e44c68ef4d919 100644 (file)
@@ -545,35 +545,19 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
 
 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
 {
-       struct vmw_fence_manager *fman = fman_from_fence(fence);
-
        fence_free(&fence->base);
-
-       /*
-        * Free kernel space accounting.
-        */
-       ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
-                           fman->fence_size);
 }
 
 int vmw_fence_create(struct vmw_fence_manager *fman,
                     uint32_t seqno,
                     struct vmw_fence_obj **p_fence)
 {
-       struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
        struct vmw_fence_obj *fence;
        int ret;
 
-       ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
-                                  false, false);
-       if (unlikely(ret != 0))
-               return ret;
-
        fence = kzalloc(sizeof(*fence), GFP_KERNEL);
-       if (unlikely(fence == NULL)) {
-               ret = -ENOMEM;
-               goto out_no_object;
-       }
+       if (unlikely(fence == NULL))
+               return -ENOMEM;
 
        ret = vmw_fence_obj_init(fman, fence, seqno,
                                 vmw_fence_destroy);
@@ -585,8 +569,6 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
 
 out_err_init:
        kfree(fence);
-out_no_object:
-       ttm_mem_global_free(mem_glob, fman->fence_size);
        return ret;
 }
 
@@ -1105,6 +1087,8 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
        if (ret != 0)
                goto out_no_queue;
 
+       return 0;
+
 out_no_queue:
        event->base.destroy(&event->base);
 out_no_event:
@@ -1180,17 +1164,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
 
        BUG_ON(fence == NULL);
 
-       if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
-               ret = vmw_event_fence_action_create(file_priv, fence,
-                                                   arg->flags,
-                                                   arg->user_data,
-                                                   true);
-       else
-               ret = vmw_event_fence_action_create(file_priv, fence,
-                                                   arg->flags,
-                                                   arg->user_data,
-                                                   true);
-
+       ret = vmw_event_fence_action_create(file_priv, fence,
+                                           arg->flags,
+                                           arg->user_data,
+                                           true);
        if (unlikely(ret != 0)) {
                if (ret != -ERESTARTSYS)
                        DRM_ERROR("Failed to attach event to fence.\n");
index 941a7bc0b79190b7bcafe751cd7cac26b371911e..3725b521d9319c9b952bf3920bfcd3a27c61dac3 100644 (file)
@@ -252,7 +252,7 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
        ret = 0;
 out:
        drm_modeset_unlock_all(dev_priv->dev);
-       drm_modeset_lock_crtc(crtc);
+       drm_modeset_lock_crtc(crtc, crtc->cursor);
 
        return ret;
 }
@@ -281,7 +281,7 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
                                   du->cursor_y + du->hotspot_y);
 
        drm_modeset_unlock_all(dev_priv->dev);
-       drm_modeset_lock_crtc(crtc);
+       drm_modeset_lock_crtc(crtc, crtc->cursor);
 
        return 0;
 }
index 15e185ae4c990788af33b20b5a63042638f67ef2..5c289f748ab49e477e564e175296a1bd2f6c709e 100644 (file)
@@ -26,6 +26,7 @@
  **************************************************************************/
 
 #include "vmwgfx_kms.h"
+#include <drm/drm_plane_helper.h>
 
 
 #define vmw_crtc_to_ldu(x) \
index 026de7cea0f662bd75e92cf4ad1dfde057c7775e..210ef15b1d0919c59e8d7eb8aff72b588f2f6ea6 100644 (file)
@@ -1222,7 +1222,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
        val_buf->bo = ttm_bo_reference(&res->backup->base);
        val_buf->shared = false;
        list_add_tail(&val_buf->head, &val_list);
-       ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
+       ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
        if (unlikely(ret != 0))
                goto out_no_reserve;
 
index b295463a60b3828686fb49b58d07b011ec38bdbe..7dc591d04d9a8ba16c5f0f5df472e82d99646db4 100644 (file)
@@ -26,6 +26,7 @@
  **************************************************************************/
 
 #include "vmwgfx_kms.h"
+#include <drm/drm_plane_helper.h>
 
 
 #define vmw_crtc_to_sou(x) \
index 8719fb3cccc93fb8b282c5022f9cdd669dc3e270..6a4584a43aa6cec2d29d35471e6c25559897b3a6 100644 (file)
@@ -198,7 +198,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
        cmd->header.size = sizeof(cmd->body);
        cmd->body.shid = res->id;
        cmd->body.mobid = bo->mem.start;
-       cmd->body.offsetInBytes = 0;
+       cmd->body.offsetInBytes = res->backup_offset;
        res->backup_dirty = false;
        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
index 3995255b16c753731420cb8283992c17c33ddf2e..5a8c8d55317ab4f622c5754f509d5d3660a3e47c 100644 (file)
@@ -97,7 +97,7 @@ fail:
 static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
 {
        u32 pos = pb->pos;
-       u32 *p = (u32 *)((u32)pb->mapped + pos);
+       u32 *p = (u32 *)((void *)pb->mapped + pos);
        WARN_ON(pos == pb->fence);
        *(p++) = op1;
        *(p++) = op2;
index 313c4b7843483ab4d0b6d31c1d76d754e2320b38..470087af8fe520681ba3cc8457646343f16f6aee 100644 (file)
@@ -42,7 +42,7 @@ struct host1x_job;
  */
 
 struct push_buffer {
-       u32 *mapped;                    /* mapped pushbuffer memory */
+       void *mapped;                   /* mapped pushbuffer memory */
        dma_addr_t phys;                /* physical address of pushbuffer */
        u32 fence;                      /* index we've written */
        u32 pos;                        /* index to write to */
index 6b09b71940c2d5df0251ec2b3cd417ec2eb93e2d..305ea8f3382d22da2611e6de7025695265d97699 100644 (file)
 #include "../debug.h"
 
 /*
- * Put the restart at the end of pushbuffer memor
+ * Put the restart at the end of pushbuffer memory
  */
 static void push_buffer_init(struct push_buffer *pb)
 {
-       *(pb->mapped + (pb->size_bytes >> 2)) = host1x_opcode_restart(0);
+       *(u32 *)(pb->mapped + pb->size_bytes) = host1x_opcode_restart(0);
 }
 
 /*
@@ -51,11 +51,11 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
 
        /* NOP all the PB slots */
        while (nr_slots--) {
-               u32 *p = (u32 *)((u32)pb->mapped + getptr);
+               u32 *p = (u32 *)(pb->mapped + getptr);
                *(p++) = HOST1X_OPCODE_NOP;
                *(p++) = HOST1X_OPCODE_NOP;
-               dev_dbg(host1x->dev, "%s: NOP at %#llx\n", __func__,
-                       (u64)pb->phys + getptr);
+               dev_dbg(host1x->dev, "%s: NOP at %pad+%#x\n", __func__,
+                       &pb->phys, getptr);
                getptr = (getptr + 8) & (pb->size_bytes - 1);
        }
        wmb();
index 4608257ab65641c488f627a0789eae9a4ba90ff1..946c332c3906ad823826b56b0c25a3300715424c 100644 (file)
@@ -32,6 +32,7 @@
 static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
                               u32 offset, u32 words)
 {
+       struct device *dev = cdma_to_channel(cdma)->dev;
        void *mem = NULL;
 
        if (host1x_debug_trace_cmdbuf)
@@ -44,11 +45,14 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
                 * of how much you can output to ftrace at once.
                 */
                for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
-                       trace_host1x_cdma_push_gather(
-                               dev_name(cdma_to_channel(cdma)->dev),
-                               (u32)bo, min(words - i, TRACE_MAX_LENGTH),
-                               offset + i * sizeof(u32), mem);
+                       u32 num_words = min(words - i, TRACE_MAX_LENGTH);
+                       offset += i * sizeof(u32);
+
+                       trace_host1x_cdma_push_gather(dev_name(dev), bo,
+                                                     num_words, offset,
+                                                     mem);
                }
+
                host1x_bo_munmap(bo, mem);
        }
 }
index f72c873eff819f831202a742cf02ec70e94753f2..791de9351eebff26946919c6b7b3e747489d2579 100644 (file)
@@ -163,8 +163,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
                                continue;
                        }
 
-                       host1x_debug_output(o, "    GATHER at %#llx+%04x, %d words\n",
-                                           (u64)g->base, g->offset, g->words);
+                       host1x_debug_output(o, "    GATHER at %pad+%#x, %d words\n",
+                                           &g->base, g->offset, g->words);
 
                        show_gather(o, g->base + g->offset, g->words, cdma,
                                    g->base, mapped);
index 33a697d6dcefea290c3bac2981e2399e27f99a14..8b3c15df066085e88a33d6efddc74cfd774bc04d 100644 (file)
@@ -23,7 +23,7 @@ struct host1x_job_gather {
        u32 words;
        dma_addr_t base;
        struct host1x_bo *bo;
-       int offset;
+       u32 offset;
        bool handled;
 };
 
index 9882ea122024e8f56c47e7b7b13f75b3a7f330aa..fbc6ee6ca3374276219c5a8ae5beab08f8149005 100644 (file)
 #define MIPI_CAL_CONFIG_DSIC           0x10
 #define MIPI_CAL_CONFIG_DSID           0x11
 
+#define MIPI_CAL_CONFIG_DSIAB_CLK      0x19
+#define MIPI_CAL_CONFIG_DSICD_CLK      0x1a
+#define MIPI_CAL_CONFIG_CSIAB_CLK      0x1b
+#define MIPI_CAL_CONFIG_CSICD_CLK      0x1c
+#define MIPI_CAL_CONFIG_CSIE_CLK       0x1d
+
+/* for data and clock lanes */
 #define MIPI_CAL_CONFIG_SELECT         (1 << 21)
+
+/* for data lanes */
 #define MIPI_CAL_CONFIG_HSPDOS(x)      (((x) & 0x1f) << 16)
 #define MIPI_CAL_CONFIG_HSPUOS(x)      (((x) & 0x1f) <<  8)
 #define MIPI_CAL_CONFIG_TERMOS(x)      (((x) & 0x1f) <<  0)
 
+/* for clock lanes */
+#define MIPI_CAL_CONFIG_HSCLKPDOSD(x)  (((x) & 0x1f) <<  8)
+#define MIPI_CAL_CONFIG_HSCLKPUOSD(x)  (((x) & 0x1f) <<  0)
+
 #define MIPI_CAL_BIAS_PAD_CFG0         0x16
 #define MIPI_CAL_BIAS_PAD_PDVCLAMP     (1 << 1)
 #define MIPI_CAL_BIAS_PAD_E_VCLAMP_REF (1 << 0)
 
 #define MIPI_CAL_BIAS_PAD_CFG1         0x17
+#define MIPI_CAL_BIAS_PAD_DRV_DN_REF(x) (((x) & 0x7) << 16)
 
 #define MIPI_CAL_BIAS_PAD_CFG2         0x18
 #define MIPI_CAL_BIAS_PAD_PDVREG       (1 << 1)
 
-static const struct module {
-       unsigned long reg;
-} modules[] = {
-       { .reg = MIPI_CAL_CONFIG_CSIA },
-       { .reg = MIPI_CAL_CONFIG_CSIB },
-       { .reg = MIPI_CAL_CONFIG_CSIC },
-       { .reg = MIPI_CAL_CONFIG_CSID },
-       { .reg = MIPI_CAL_CONFIG_CSIE },
-       { .reg = MIPI_CAL_CONFIG_DSIA },
-       { .reg = MIPI_CAL_CONFIG_DSIB },
-       { .reg = MIPI_CAL_CONFIG_DSIC },
-       { .reg = MIPI_CAL_CONFIG_DSID },
+struct tegra_mipi_pad {
+       unsigned long data;
+       unsigned long clk;
+};
+
+struct tegra_mipi_soc {
+       bool has_clk_lane;
+       const struct tegra_mipi_pad *pads;
+       unsigned int num_pads;
 };
 
 struct tegra_mipi {
+       const struct tegra_mipi_soc *soc;
        void __iomem *regs;
        struct mutex lock;
        struct clk *clk;
@@ -90,16 +102,16 @@ struct tegra_mipi_device {
        unsigned long pads;
 };
 
-static inline unsigned long tegra_mipi_readl(struct tegra_mipi *mipi,
-                                            unsigned long reg)
+static inline u32 tegra_mipi_readl(struct tegra_mipi *mipi,
+                                  unsigned long offset)
 {
-       return readl(mipi->regs + (reg << 2));
+       return readl(mipi->regs + (offset << 2));
 }
 
-static inline void tegra_mipi_writel(struct tegra_mipi *mipi,
-                                    unsigned long value, unsigned long reg)
+static inline void tegra_mipi_writel(struct tegra_mipi *mipi, u32 value,
+                                    unsigned long offset)
 {
-       writel(value, mipi->regs + (reg << 2));
+       writel(value, mipi->regs + (offset << 2));
 }
 
 struct tegra_mipi_device *tegra_mipi_request(struct device *device)
@@ -117,36 +129,35 @@ struct tegra_mipi_device *tegra_mipi_request(struct device *device)
 
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
        if (!dev) {
-               of_node_put(args.np);
                err = -ENOMEM;
                goto out;
        }
 
        dev->pdev = of_find_device_by_node(args.np);
        if (!dev->pdev) {
-               of_node_put(args.np);
                err = -ENODEV;
                goto free;
        }
 
-       of_node_put(args.np);
-
        dev->mipi = platform_get_drvdata(dev->pdev);
        if (!dev->mipi) {
                err = -EPROBE_DEFER;
-               goto pdev_put;
+               goto put;
        }
 
+       of_node_put(args.np);
+
        dev->pads = args.args[0];
        dev->device = device;
 
        return dev;
 
-pdev_put:
+put:
        platform_device_put(dev->pdev);
 free:
        kfree(dev);
 out:
+       of_node_put(args.np);
        return ERR_PTR(err);
 }
 EXPORT_SYMBOL(tegra_mipi_request);
@@ -161,7 +172,7 @@ EXPORT_SYMBOL(tegra_mipi_free);
 static int tegra_mipi_wait(struct tegra_mipi *mipi)
 {
        unsigned long timeout = jiffies + msecs_to_jiffies(250);
-       unsigned long value;
+       u32 value;
 
        while (time_before(jiffies, timeout)) {
                value = tegra_mipi_readl(mipi, MIPI_CAL_STATUS);
@@ -177,8 +188,9 @@ static int tegra_mipi_wait(struct tegra_mipi *mipi)
 
 int tegra_mipi_calibrate(struct tegra_mipi_device *device)
 {
-       unsigned long value;
+       const struct tegra_mipi_soc *soc = device->mipi->soc;
        unsigned int i;
+       u32 value;
        int err;
 
        err = clk_enable(device->mipi->clk);
@@ -192,23 +204,35 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
        value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
        tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
 
+       tegra_mipi_writel(device->mipi, MIPI_CAL_BIAS_PAD_DRV_DN_REF(2),
+                         MIPI_CAL_BIAS_PAD_CFG1);
+
        value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2);
        value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
        tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
 
-       for (i = 0; i < ARRAY_SIZE(modules); i++) {
-               if (device->pads & BIT(i))
-                       value = MIPI_CAL_CONFIG_SELECT |
-                               MIPI_CAL_CONFIG_HSPDOS(0) |
-                               MIPI_CAL_CONFIG_HSPUOS(4) |
-                               MIPI_CAL_CONFIG_TERMOS(5);
-               else
-                       value = 0;
+       for (i = 0; i < soc->num_pads; i++) {
+               u32 clk = 0, data = 0;
+
+               if (device->pads & BIT(i)) {
+                       data = MIPI_CAL_CONFIG_SELECT |
+                              MIPI_CAL_CONFIG_HSPDOS(0) |
+                              MIPI_CAL_CONFIG_HSPUOS(4) |
+                              MIPI_CAL_CONFIG_TERMOS(5);
+                       clk = MIPI_CAL_CONFIG_SELECT |
+                             MIPI_CAL_CONFIG_HSCLKPDOSD(0) |
+                             MIPI_CAL_CONFIG_HSCLKPUOSD(4);
+               }
 
-               tegra_mipi_writel(device->mipi, value, modules[i].reg);
+               tegra_mipi_writel(device->mipi, data, soc->pads[i].data);
+
+               if (soc->has_clk_lane)
+                       tegra_mipi_writel(device->mipi, clk, soc->pads[i].clk);
        }
 
-       tegra_mipi_writel(device->mipi, MIPI_CAL_CTRL_START, MIPI_CAL_CTRL);
+       value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
+       value |= MIPI_CAL_CTRL_START;
+       tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
 
        err = tegra_mipi_wait(device->mipi);
 
@@ -219,16 +243,63 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
 }
 EXPORT_SYMBOL(tegra_mipi_calibrate);
 
+static const struct tegra_mipi_pad tegra114_mipi_pads[] = {
+       { .data = MIPI_CAL_CONFIG_CSIA },
+       { .data = MIPI_CAL_CONFIG_CSIB },
+       { .data = MIPI_CAL_CONFIG_CSIC },
+       { .data = MIPI_CAL_CONFIG_CSID },
+       { .data = MIPI_CAL_CONFIG_CSIE },
+       { .data = MIPI_CAL_CONFIG_DSIA },
+       { .data = MIPI_CAL_CONFIG_DSIB },
+       { .data = MIPI_CAL_CONFIG_DSIC },
+       { .data = MIPI_CAL_CONFIG_DSID },
+};
+
+static const struct tegra_mipi_soc tegra114_mipi_soc = {
+       .has_clk_lane = false,
+       .pads = tegra114_mipi_pads,
+       .num_pads = ARRAY_SIZE(tegra114_mipi_pads),
+};
+
+static const struct tegra_mipi_pad tegra124_mipi_pads[] = {
+       { .data = MIPI_CAL_CONFIG_CSIA, .clk = MIPI_CAL_CONFIG_CSIAB_CLK },
+       { .data = MIPI_CAL_CONFIG_CSIB, .clk = MIPI_CAL_CONFIG_CSIAB_CLK },
+       { .data = MIPI_CAL_CONFIG_CSIC, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
+       { .data = MIPI_CAL_CONFIG_CSID, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
+       { .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK },
+       { .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
+       { .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
+};
+
+static const struct tegra_mipi_soc tegra124_mipi_soc = {
+       .has_clk_lane = true,
+       .pads = tegra124_mipi_pads,
+       .num_pads = ARRAY_SIZE(tegra124_mipi_pads),
+};
+
+static struct of_device_id tegra_mipi_of_match[] = {
+       { .compatible = "nvidia,tegra114-mipi", .data = &tegra114_mipi_soc },
+       { .compatible = "nvidia,tegra124-mipi", .data = &tegra124_mipi_soc },
+       { },
+};
+
 static int tegra_mipi_probe(struct platform_device *pdev)
 {
+       const struct of_device_id *match;
        struct tegra_mipi *mipi;
        struct resource *res;
        int err;
 
+       match = of_match_node(tegra_mipi_of_match, pdev->dev.of_node);
+       if (!match)
+               return -ENODEV;
+
        mipi = devm_kzalloc(&pdev->dev, sizeof(*mipi), GFP_KERNEL);
        if (!mipi)
                return -ENOMEM;
 
+       mipi->soc = match->data;
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        mipi->regs = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(mipi->regs))
@@ -260,11 +331,6 @@ static int tegra_mipi_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id tegra_mipi_of_match[] = {
-       { .compatible = "nvidia,tegra114-mipi", },
-       { },
-};
-
 struct platform_driver tegra_mipi_driver = {
        .driver = {
                .name = "tegra-mipi",
index 73bd9e2e42bc3c7dfbd249c0a1d3d4e1c78a19f9..3402033fa52a7225c0a91846eba5237af8d08142 100644 (file)
@@ -1659,6 +1659,7 @@ void hid_disconnect(struct hid_device *hdev)
                hdev->hiddev_disconnect(hdev);
        if (hdev->claimed & HID_CLAIMED_HIDRAW)
                hidraw_disconnect(hdev);
+       hdev->claimed = 0;
 }
 EXPORT_SYMBOL_GPL(hid_disconnect);
 
index e23ab8b30626dae386ebc84e507c55a8e5dcea6c..7c863738e419969a9dd0115ca7321d884034ae59 100644 (file)
 #define USB_VENDOR_ID_ELAN             0x04f3
 #define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
 #define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B    0x009b
+#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103    0x0103
 #define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F    0x016f
 
 #define USB_VENDOR_ID_ELECOM           0x056e
index 5014bb567b29cd8f2799873ffb869ac55a28c6ae..552671ee7c5d7c6344fb92bca48f4f2d601b640e 100644 (file)
@@ -72,6 +72,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
index fcdbde4ec692f25c11f0a44b281a3770ede2d0c0..3057dfc7e3bc6cde853660579b01b4cab3530344 100644 (file)
@@ -234,7 +234,7 @@ static const struct pci_device_id fam15h_power_id_table[] = {
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
-       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
        {}
 };
 MODULE_DEVICE_TABLE(pci, fam15h_power_id_table);
index 6aac695b1688beaf2adc5336bd842bb7993d2d09..9b55e673b67caf1365c7452ce51a22a37510af02 100644 (file)
@@ -1084,10 +1084,8 @@ static int g762_probe(struct i2c_client *client, const struct i2c_device_id *id)
        if (ret)
                goto clock_dis;
 
-       data->hwmon_dev = devm_hwmon_device_register_with_groups(dev,
-                                                                client->name,
-                                                                data,
-                                                                g762_groups);
+       data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
+                                                           data, g762_groups);
        if (IS_ERR(data->hwmon_dev)) {
                ret = PTR_ERR(data->hwmon_dev);
                goto clock_dis;
index d2bf2c97ae7094c03ff836f6debf2de8ca66677c..6a30eeea94beff8ee3067de4106a8db1bec14a8a 100644 (file)
@@ -181,7 +181,7 @@ static int __init populate_attr_groups(struct platform_device *pdev)
 
        opal = of_find_node_by_path("/ibm,opal/sensors");
        if (!opal) {
-               dev_err(&pdev->dev, "Opal node 'sensors' not found\n");
+               dev_dbg(&pdev->dev, "Opal node 'sensors' not found\n");
                return -ENODEV;
        }
 
@@ -335,7 +335,9 @@ static int __init ibmpowernv_init(void)
 
        err = platform_driver_probe(&ibmpowernv_driver, ibmpowernv_probe);
        if (err) {
-               pr_err("Platfrom driver probe failed\n");
+               if (err != -ENODEV)
+                       pr_err("Platform driver probe failed (%d)\n", err);
+
                goto exit_device_del;
        }
 
index 823c877a1ec0952ce5fab7a465c79826f3442764..1991d9032c3843de2ffcd20b82f5790d22ce2684 100644 (file)
@@ -161,10 +161,17 @@ static int pwm_fan_suspend(struct device *dev)
 static int pwm_fan_resume(struct device *dev)
 {
        struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
+       unsigned long duty;
+       int ret;
 
-       if (ctx->pwm_value)
-               return pwm_enable(ctx->pwm);
-       return 0;
+       if (ctx->pwm_value == 0)
+               return 0;
+
+       duty = DIV_ROUND_UP(ctx->pwm_value * (ctx->pwm->period - 1), MAX_PWM);
+       ret = pwm_config(ctx->pwm, duty, ctx->pwm->period);
+       if (ret)
+               return ret;
+       return pwm_enable(ctx->pwm);
 }
 #endif
 
index 65ef9664d5da884cdec666a81228408e5240b381..899bede81b31b37e7a5ee4ec45d37afadb13971d 100644 (file)
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
-    MA 02110-1301 USA.
  * ------------------------------------------------------------------------- */
 
 /* With some changes from Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki
index 8b10f88b13d9bd8c908997d3ebdde7de84718732..580dbf05c1487c46e7ef504c18cfc8f128ced4cc 100644 (file)
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- *  MA 02110-1301 USA.
  */
 
 #include <linux/kernel.h>
index 34370090b753f616ff56e8a67e8172210b05c2cd..270d84bfc2c68b526d6b8119afa67f4cb3c37543 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  *  GNU General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- *  MA 02110-1301 USA.
- *
  * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
  * Frodo Looijaard <frodol@dds.nl>, and also from Martin Bailey
  * <mbailey@littlefeet-inc.com>
index 1ec703ee788d2cc2c3467917d989f6c1c3c16973..262ee801975b314ce661f257595b74026637af6d 100644 (file)
     This program is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
-    MA 02110-1301 USA.                                                 */
+    GNU General Public License for more details.                       */
 /* --------------------------------------------------------------------        */
 
 /* With some changes from Frodo Looijaard <frodol@dds.nl> */
index 451e305f797133291ffbd48bceefaa8eb194d1ed..4f2d78868281ef7eda0e11bc03518f10089a2aff 100644 (file)
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
 /*
index 2fa21ce9682b9b64e0511794ec1e2d3f525ca0b5..45c5c488302282876032e4fc7da289af00d1e1e1 100644 (file)
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
 /*
index 41fc6837fb8b5393f2193873c765d07d87a2e955..65e324054970b51aded8091d8f8b5a212fd49bc4 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
  
 /*
index a16f72891358124c6069ffbfd9eee537dfb2abf0..6c7113d990f882a758435eb78fc172510a6faed6 100644 (file)
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
 /*
index 917d54588d95c14f966abd326e12467bea244342..e05a672db3e50aa3800cbb1d1db3af4c74ba9577 100644 (file)
@@ -434,7 +434,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
                }
        }
 
-       ret = wait_for_completion_io_timeout(&dev->cmd_complete,
+       ret = wait_for_completion_timeout(&dev->cmd_complete,
                                             dev->adapter.timeout);
        if (ret == 0) {
                dev_err(dev->dev, "controller timed out\n");
index 8762458ca7da1f09d5094f5f0490bd2bb22c3c2b..6f8c0756e350c707df7202c9f0b6062efda42759 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  */
 
 #include <linux/delay.h>
index 63f3f03ecc9b069f899eccbe9524168bbef3656c..c604f4c3ac0dd53d036e28d839f81a87dbfea913 100644 (file)
 #define CDNS_I2C_DIVA_MAX      4
 #define CDNS_I2C_DIVB_MAX      64
 
+#define CDNS_I2C_TIMEOUT_MAX   0xFF
+
 #define cdns_i2c_readreg(offset)       readl_relaxed(id->membase + offset)
 #define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
 
@@ -852,6 +854,15 @@ static int cdns_i2c_probe(struct platform_device *pdev)
                goto err_clk_dis;
        }
 
+       /*
+        * Cadence I2C controller has a bug wherein it generates
+        * invalid read transaction after HW timeout in master receiver mode.
+        * HW timeout is not used by this driver and the interrupt is disabled.
+        * But the feature itself cannot be disabled. Hence maximum value
+        * is written to this register to reduce the chances of error.
+        */
+       cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
+
        dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
                 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
 
index f3b89a4698b6192a24d031bab358796e51fbb767..5bdbc71698d0ec040bdecf6ad7d7ee9581e60706 100644 (file)
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/kernel.h>
index 4d96147191289680837e6710b8d7edfb5ecda73e..01f0cd87a4a5b6d40fb0c01b17d5d589cc2d4ac4 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  * ----------------------------------------------------------------------------
  *
  */
@@ -411,11 +407,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
        if (dev->cmd_err & DAVINCI_I2C_STR_NACK) {
                if (msg->flags & I2C_M_IGNORE_NAK)
                        return msg->len;
-               if (stop) {
-                       w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
-                       w |= DAVINCI_I2C_MDR_STP;
-                       davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
-               }
+               w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
+               w |= DAVINCI_I2C_MDR_STP;
+               davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
                return -EREMOTEIO;
        }
        return -EIO;
index 3c20e4bd6dd1380238df20f06941828ec84aadd0..23628b7bfb8d8df208c6e434efb95e887dfad6e6 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  * ----------------------------------------------------------------------------
  *
  */
@@ -363,7 +359,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        }
 
        /* Configure Tx/Rx FIFO threshold levels */
-       dw_writel(dev, dev->tx_fifo_depth - 1, DW_IC_TX_TL);
+       dw_writel(dev, dev->tx_fifo_depth / 2, DW_IC_TX_TL);
        dw_writel(dev, 0, DW_IC_RX_TL);
 
        /* configure the i2c master */
index d66b6cbc9edcbb742079612d336cc56e093b99c4..5a410ef17abd40c0ab7c7ece3ab3fa539adf1f95 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  * ----------------------------------------------------------------------------
  *
  */
index d31d313ab4f7694564012d3ad6d570a0c9f5f3a7..acb40f95db78f512c561f6d4d2b5b67479844811 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  * ----------------------------------------------------------------------------
  *
  */
index a7431150acf7c3da87e248e4e2f25fa3ffa16589..373dd4d477653f11d943ea49ad87c92a598636c5 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  * ----------------------------------------------------------------------------
  *
  */
index a44ea13d143492ffd433a33148870e37f0a1450c..76e699f9ed9732cb910fd6092837d1ddf13c37c5 100644 (file)
@@ -9,10 +9,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
  */
 
 #include <linux/module.h>
index 485497066ed7abca4df32b602b17f1e72b455a73..92e8c0ce16258111add80ecb60ce2ee37949309a 100644 (file)
     This program is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.               */
+    GNU General Public License for more details.                            */
 /* ------------------------------------------------------------------------- */
 
 /* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
index 14d2b76de25ff1da9a520236f110ce678a57bd43..b7864cf42a72c3d9a7d40008dd3fc1f0fa4c4cc8 100644 (file)
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
 #include <linux/kernel.h>
index 7cfc183b3d638adfcac780f750481fec622e738e..6ab4f1cb21f3fae717dfdfbf3b07e721d6e7254e 100644 (file)
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
 /*
index c48e46af670abec3314e63defc1662076c8008aa..e9fb7cf786120010d0c91a139612b4c59d90cadf 100644 (file)
  *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *     GNU General Public License for more details.
  *
- *     You should have received a copy of the GNU General Public License
- *     along with this program; if not, write to the Free Software
- *     Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307,
- *     USA.
- *
  * Author:
  *     Darius Augulis, Teltonika Inc.
  *
index 097e270955d0d2abce616dc81c07a1a7cf88668b..2d6929c2bd9270108195876ce42d8fe03a3fe304 100644 (file)
     This program is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.                */
+    GNU General Public License for more details.                            */
 /* ------------------------------------------------------------------------- */
 
 
index cf99dbf21fd100a5002aac1a6d98ddb20ab847c9..113293d275f670aa6fe8a33cc24889fb9bceaceb 100644 (file)
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
 /*
index 3f6ecbfb9a56bf641e27d42b8e108116a9f99c5b..f2b0ff011631c9e920e042a1ec79b0b98126eb5b 100644 (file)
  * WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  * The full GNU General Public License is included in this distribution
  * in the file called LICENSE.GPL.
  *
index b170bdffb5de3aa273d27b97b567b13b27cff278..88eda09e73c0b31509427a784c5219655a49c2fb 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 /*
index ee3a76c7ae9757a544da330c3e8ce776e6291d26..70b3c91585097cd5623953f1a2ed97e45fdad01b 100644 (file)
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
 /*
index 0dffb0e62c3b876f5898594868bc7ad59fcd8d74..277a2288d4a86285c3335ba19fdabf01cd85aa74 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
@@ -926,14 +922,12 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
                if (stat & OMAP_I2C_STAT_NACK) {
                        err |= OMAP_I2C_STAT_NACK;
                        omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
-                       break;
                }
 
                if (stat & OMAP_I2C_STAT_AL) {
                        dev_err(dev->dev, "Arbitration lost\n");
                        err |= OMAP_I2C_STAT_AL;
                        omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
-                       break;
                }
 
                /*
@@ -958,11 +952,13 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
                        if (dev->fifo_size)
                                num_bytes = dev->buf_len;
 
-                       omap_i2c_receive_data(dev, num_bytes, true);
-
-                       if (dev->errata & I2C_OMAP_ERRATA_I207)
+                       if (dev->errata & I2C_OMAP_ERRATA_I207) {
                                i2c_omap_errata_i207(dev, stat);
+                               num_bytes = (omap_i2c_read_reg(dev,
+                                       OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F;
+                       }
 
+                       omap_i2c_receive_data(dev, num_bytes, true);
                        omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
                        continue;
                }
index 62f55fe624cb3be72228decd9f03631f85d2e483..d1f625f923c75451033891bcdd18c0aab6843f8f 100644 (file)
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.
-
-   You should have received a copy of the GNU General Public License
-   along with this program; if not, write to the Free Software
-   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  * ------------------------------------------------------------------------ */
 
 #include <linux/kernel.h>
index a27aae2d6757195039a69cd192dda8774081b194..a1fac5aa9bae9b47e10fc6b723c66ac2ff2067b9 100644 (file)
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.
-
-   You should have received a copy of the GNU General Public License
-   along with this program; if not, write to the Free Software
-   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  * ------------------------------------------------------------------------ */
 
 #include <linux/kernel.h>
index e572f3aac0f79863a98d7c9c134ab6b038c36eec..4e129453680515888b8ca31f22dba490efc94d5d 100644 (file)
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.
-
-   You should have received a copy of the GNU General Public License
-   along with this program; if not, write to the Free Software
-   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  * ------------------------------------------------------------------------ */
 
 #define PORT_DATA      0
index 7a9dce43e1156afad3c8e906fa4b92df59586ac6..df1dbc92a0244c4eb989e88826c9f24235f35c7a 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
 
 #include <linux/module.h>
index 323f061a316378dfb827ee71af2eb5b2f578ed31..e0eb4ca0102e613130702757b7add12dd99bd05a 100644 (file)
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/kernel.h>
index a6f54ba27e2a5ff62bb24fca2982f13a2aed0aa3..67cbec6796a0eee50d0047e16a9db757f27836a9 100644 (file)
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
 /*
index 8564768fee32eb24d7bfd92aec29d362ffe6cbdc..177834e2d84101283ad0afd288e75703e5d4aa12 100644 (file)
  *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/kernel.h>
index 01e967763c2a70ed92e15eb4e134ab84f621d42c..60a53c169ed2b3c8d3803411a310b752d8e1631b 100644 (file)
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
 
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
 */
 
 #include <linux/module.h>
index e3b0337faeb7f0b9ac4fe44546974605e047b4a0..65244774bfa343d464d0ad28d7503e886767f21c 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
 
 #include <linux/kernel.h>
index 8b5e79cb4468a87f01d073db574313529d0c7af1..4855188747c94e3cf3536579c544d01bf6c09b44 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
index 0fe505d7abe9b65e93882862e98ba586b2e73407..2b6219d86b0f31372ecceea15a6a8e29966f8eae 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  */
 
 #include <linux/kernel.h>
index 964e5c6f84abcecf0e3d82d795613c216489c54d..15ac8395dcd3ccac120f49cef28d508d62f552ba 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
 
 #include <linux/kernel.h>
index ac9bc33acef493cf07a1ed9546fcb7b236604dfd..7d58a40faf2dc8713d6cad4662c7adadf6a0a8a6 100644 (file)
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
 /* Note: we assume there can only be one SIS5595 with one SMBus interface */
index c6366733008d15cd145a15aa62a90e4832304d1c..1e6805b5cef23e810d5437a943f01793acb56466 100644 (file)
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
 /*
index 8dc2fc5f74ffc523d335dbeec95432034b63b867..44b904426073f6228bb9b014705d7aa0f4136088 100644 (file)
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
 /*
index 10855a0b7e7fe181c6fbc2ecb7baddbc1ef99f80..4c7fc2d47014f214c2a316189d75fb8869c0ae1d 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #include <linux/delay.h>
index f4a1ed757612851033dac49292138e4aaa266142..59b1d233ca7b2443992dca9a5b139f2db1dfb7c5 100644 (file)
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
 #include <linux/kernel.h>
index 6841200b6e5042d7313b1f09f68e13d5d2c0ca09..0ee2646f3b006bb7710cf9474ce2ccae366e4ef3 100644 (file)
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
 /*
index ade9223912d3069c9236fa4a0b6f03327f2b4593..cc65ea0b818fe5cbf6c1c1c17c79304ba085e0aa 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  *
  * This code was implemented by Mocean Laboratories AB when porting linux
  * to the automotive development board Russellville. The copyright holder
index ff3f5747e43b99123b5a3db733f4ac4c1925b394..5153354b1a6b8d151fd2265981175facfecb2baf 100644 (file)
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index f24cc64e2e8c6a1288f0cdc0f38c545723057477..90e32295930332ec9fa92fd9f9ab153ee9b6e840 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
  */
 
 #include <linux/kernel.h>
index 2f90ac6a7f794ad8e79577a2c5e59ed853bf413f..f43b4e11647a28338235b1fcba1070ab2a34f91b 100644 (file)
     This program is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
-    MA 02110-1301 USA.                                                      */
+    GNU General Public License for more details.                            */
 /* ------------------------------------------------------------------------- */
 
 /* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi>.
@@ -670,6 +665,9 @@ static int i2c_device_remove(struct device *dev)
                status = driver->remove(client);
        }
 
+       if (dev->of_node)
+               irq_dispose_mapping(client->irq);
+
        dev_pm_domain_detach(&client->dev, true);
        return status;
 }
index 18a8fd21d2c226741445fdc5bf12ed306e10ff7e..17700bfddcf58fc00f036de804dddeb22ed458f0 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
  */
 
 #include <linux/rwsem.h>
index 80b47e8ce030cef7ff6f9ab9f058b15e63f7329e..71c7a3975b6287927c4bb666f433cf2787e20a54 100644 (file)
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
-    MA 02110-1301 USA.
 */
 
 /* Note that this is a complete rewrite of Simon Vogl's i2c-dev module.
index fc99f0d6b4a5b0d1f11d886e5bdb5abf23073b0f..9ebf9cb4ad7a90579e879a5b586bdf3e0c75c4cb 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
  */
 
 #include <linux/kernel.h>
index d241aa295d96907d942c60290cfd89f54880330e..af2a94e1140ba064b038e4beb4e9c9a1dee1e0cd 100644 (file)
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
 #define DEBUG 1
index 22c096ce39ad765c6a50d26ff80e77158fe3bbf6..513bd6d14293d80e5ce502080a092b5f970fe840 100644 (file)
@@ -44,6 +44,9 @@
 
 #define BMC150_ACCEL_REG_INT_STATUS_2          0x0B
 #define BMC150_ACCEL_ANY_MOTION_MASK           0x07
+#define BMC150_ACCEL_ANY_MOTION_BIT_X          BIT(0)
+#define BMC150_ACCEL_ANY_MOTION_BIT_Y          BIT(1)
+#define BMC150_ACCEL_ANY_MOTION_BIT_Z          BIT(2)
 #define BMC150_ACCEL_ANY_MOTION_BIT_SIGN       BIT(3)
 
 #define BMC150_ACCEL_REG_PMU_LPW               0x11
@@ -92,9 +95,9 @@
 #define BMC150_ACCEL_SLOPE_THRES_MASK          0xFF
 
 /* Slope duration in terms of number of samples */
-#define BMC150_ACCEL_DEF_SLOPE_DURATION        2
+#define BMC150_ACCEL_DEF_SLOPE_DURATION                1
 /* in terms of multiples of g's/LSB, based on range */
-#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD       5
+#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD       1
 
 #define BMC150_ACCEL_REG_XOUT_L                0x02
 
@@ -536,6 +539,9 @@ static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
        if (ret < 0) {
                dev_err(&data->client->dev,
                        "Failed: bmc150_accel_set_power_state for %d\n", on);
+               if (on)
+                       pm_runtime_put_noidle(&data->client->dev);
+
                return ret;
        }
 
@@ -811,6 +817,7 @@ static int bmc150_accel_write_event_config(struct iio_dev *indio_dev,
 
        ret =  bmc150_accel_setup_any_motion_interrupt(data, state);
        if (ret < 0) {
+               bmc150_accel_set_power_state(data, false);
                mutex_unlock(&data->mutex);
                return ret;
        }
@@ -846,7 +853,7 @@ static const struct attribute_group bmc150_accel_attrs_group = {
 
 static const struct iio_event_spec bmc150_accel_event = {
                .type = IIO_EV_TYPE_ROC,
-               .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING,
+               .dir = IIO_EV_DIR_EITHER,
                .mask_separate = BIT(IIO_EV_INFO_VALUE) |
                                 BIT(IIO_EV_INFO_ENABLE) |
                                 BIT(IIO_EV_INFO_PERIOD)
@@ -1054,6 +1061,7 @@ static int bmc150_accel_data_rdy_trigger_set_state(struct iio_trigger *trig,
        else
                ret = bmc150_accel_setup_new_data_interrupt(data, state);
        if (ret < 0) {
+               bmc150_accel_set_power_state(data, false);
                mutex_unlock(&data->mutex);
                return ret;
        }
@@ -1092,12 +1100,26 @@ static irqreturn_t bmc150_accel_event_handler(int irq, void *private)
        else
                dir = IIO_EV_DIR_RISING;
 
-       if (ret & BMC150_ACCEL_ANY_MOTION_MASK)
+       if (ret & BMC150_ACCEL_ANY_MOTION_BIT_X)
+               iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
+                                                       0,
+                                                       IIO_MOD_X,
+                                                       IIO_EV_TYPE_ROC,
+                                                       dir),
+                                                       data->timestamp);
+       if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Y)
                iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
                                                        0,
-                                                       IIO_MOD_X_OR_Y_OR_Z,
+                                                       IIO_MOD_Y,
                                                        IIO_EV_TYPE_ROC,
-                                                       IIO_EV_DIR_EITHER),
+                                                       dir),
+                                                       data->timestamp);
+       if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Z)
+               iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
+                                                       0,
+                                                       IIO_MOD_Z,
+                                                       IIO_EV_TYPE_ROC,
+                                                       dir),
                                                        data->timestamp);
 ack_intr_status:
        if (!data->dready_trigger_on)
@@ -1354,10 +1376,14 @@ static int bmc150_accel_runtime_suspend(struct device *dev)
 {
        struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct bmc150_accel_data *data = iio_priv(indio_dev);
+       int ret;
 
        dev_dbg(&data->client->dev,  __func__);
+       ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
+       if (ret < 0)
+               return -EAGAIN;
 
-       return bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
+       return 0;
 }
 
 static int bmc150_accel_runtime_resume(struct device *dev)
index 98909a9e284e408756d3b21ed848d0d32b077bfc..320aa72c0349ecabeae7ea4a0bdb59d2c84cd63d 100644 (file)
@@ -269,6 +269,8 @@ static int kxcjk1013_set_range(struct kxcjk1013_data *data, int range_index)
                return ret;
        }
 
+       ret &= ~(KXCJK1013_REG_CTRL1_BIT_GSEL0 |
+                KXCJK1013_REG_CTRL1_BIT_GSEL1);
        ret |= (KXCJK1013_scale_table[range_index].gsel_0 << 3);
        ret |= (KXCJK1013_scale_table[range_index].gsel_1 << 4);
 
@@ -894,7 +896,7 @@ static const struct attribute_group kxcjk1013_attrs_group = {
 
 static const struct iio_event_spec kxcjk1013_event = {
                .type = IIO_EV_TYPE_THRESH,
-               .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING,
+               .dir = IIO_EV_DIR_EITHER,
                .mask_separate = BIT(IIO_EV_INFO_VALUE) |
                                 BIT(IIO_EV_INFO_ENABLE) |
                                 BIT(IIO_EV_INFO_PERIOD)
index b58d6302521f4d651359715331e83a5a416583a0..d095efe1ba149caa57136ec1f27f1c6caac10cd8 100644 (file)
@@ -152,6 +152,7 @@ static void men_z188_remove(struct mcb_device *dev)
 
 static const struct mcb_device_id men_z188_ids[] = {
        { .device = 0xbc },
+       { }
 };
 MODULE_DEVICE_TABLE(mcb, men_z188_ids);
 
index 1f967e0d688e47a084f29e2b484621016ada7c3c..d2fa526740ca188e00926f42b733af91d4dcd9df 100644 (file)
@@ -67,6 +67,9 @@
 #define BMG160_REG_INT_EN_0            0x15
 #define BMG160_DATA_ENABLE_INT         BIT(7)
 
+#define BMG160_REG_INT_EN_1            0x16
+#define BMG160_INT1_BIT_OD             BIT(1)
+
 #define BMG160_REG_XOUT_L              0x02
 #define BMG160_AXIS_TO_REG(axis)       (BMG160_REG_XOUT_L + (axis * 2))
 
@@ -82,6 +85,9 @@
 
 #define BMG160_REG_INT_STATUS_2        0x0B
 #define BMG160_ANY_MOTION_MASK         0x07
+#define BMG160_ANY_MOTION_BIT_X                BIT(0)
+#define BMG160_ANY_MOTION_BIT_Y                BIT(1)
+#define BMG160_ANY_MOTION_BIT_Z                BIT(2)
 
 #define BMG160_REG_TEMP                0x08
 #define BMG160_TEMP_CENTER_VAL         23
@@ -222,6 +228,19 @@ static int bmg160_chip_init(struct bmg160_data *data)
        data->slope_thres = ret;
 
        /* Set default interrupt mode */
+       ret = i2c_smbus_read_byte_data(data->client, BMG160_REG_INT_EN_1);
+       if (ret < 0) {
+               dev_err(&data->client->dev, "Error reading reg_int_en_1\n");
+               return ret;
+       }
+       ret &= ~BMG160_INT1_BIT_OD;
+       ret = i2c_smbus_write_byte_data(data->client,
+                                       BMG160_REG_INT_EN_1, ret);
+       if (ret < 0) {
+               dev_err(&data->client->dev, "Error writing reg_int_en_1\n");
+               return ret;
+       }
+
        ret = i2c_smbus_write_byte_data(data->client,
                                        BMG160_REG_INT_RST_LATCH,
                                        BMG160_INT_MODE_LATCH_INT |
@@ -250,6 +269,9 @@ static int bmg160_set_power_state(struct bmg160_data *data, bool on)
        if (ret < 0) {
                dev_err(&data->client->dev,
                        "Failed: bmg160_set_power_state for %d\n", on);
+               if (on)
+                       pm_runtime_put_noidle(&data->client->dev);
+
                return ret;
        }
 #endif
@@ -705,6 +727,7 @@ static int bmg160_write_event_config(struct iio_dev *indio_dev,
 
        ret =  bmg160_setup_any_motion_interrupt(data, state);
        if (ret < 0) {
+               bmg160_set_power_state(data, false);
                mutex_unlock(&data->mutex);
                return ret;
        }
@@ -743,7 +766,7 @@ static const struct attribute_group bmg160_attrs_group = {
 
 static const struct iio_event_spec bmg160_event = {
                .type = IIO_EV_TYPE_ROC,
-               .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING,
+               .dir = IIO_EV_DIR_EITHER,
                .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
                                       BIT(IIO_EV_INFO_ENABLE)
 };
@@ -871,6 +894,7 @@ static int bmg160_data_rdy_trigger_set_state(struct iio_trigger *trig,
        else
                ret = bmg160_setup_new_data_interrupt(data, state);
        if (ret < 0) {
+               bmg160_set_power_state(data, false);
                mutex_unlock(&data->mutex);
                return ret;
        }
@@ -908,10 +932,24 @@ static irqreturn_t bmg160_event_handler(int irq, void *private)
        else
                dir = IIO_EV_DIR_FALLING;
 
-       if (ret & BMG160_ANY_MOTION_MASK)
+       if (ret & BMG160_ANY_MOTION_BIT_X)
                iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
                                                        0,
-                                                       IIO_MOD_X_OR_Y_OR_Z,
+                                                       IIO_MOD_X,
+                                                       IIO_EV_TYPE_ROC,
+                                                       dir),
+                                                       data->timestamp);
+       if (ret & BMG160_ANY_MOTION_BIT_Y)
+               iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
+                                                       0,
+                                                       IIO_MOD_Y,
+                                                       IIO_EV_TYPE_ROC,
+                                                       dir),
+                                                       data->timestamp);
+       if (ret & BMG160_ANY_MOTION_BIT_Z)
+               iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
+                                                       0,
+                                                       IIO_MOD_Z,
                                                        IIO_EV_TYPE_ROC,
                                                        dir),
                                                        data->timestamp);
@@ -1169,8 +1207,15 @@ static int bmg160_runtime_suspend(struct device *dev)
 {
        struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct bmg160_data *data = iio_priv(indio_dev);
+       int ret;
+
+       ret = bmg160_set_mode(data, BMG160_MODE_SUSPEND);
+       if (ret < 0) {
+               dev_err(&data->client->dev, "set mode failed\n");
+               return -EAGAIN;
+       }
 
-       return bmg160_set_mode(data, BMG160_MODE_SUSPEND);
+       return 0;
 }
 
 static int bmg160_runtime_resume(struct device *dev)
index a15006efa1372e9c466c090a7b2bf409749da3e7..0763b86325738701175fef40030d33a2485d47f8 100644 (file)
@@ -230,9 +230,12 @@ static int tsl4531_resume(struct device *dev)
        return i2c_smbus_write_byte_data(to_i2c_client(dev), TSL4531_CONTROL,
                TSL4531_MODE_NORMAL);
 }
-#endif
 
 static SIMPLE_DEV_PM_OPS(tsl4531_pm_ops, tsl4531_suspend, tsl4531_resume);
+#define TSL4531_PM_OPS (&tsl4531_pm_ops)
+#else
+#define TSL4531_PM_OPS NULL
+#endif
 
 static const struct i2c_device_id tsl4531_id[] = {
        { "tsl4531", 0 },
@@ -243,7 +246,7 @@ MODULE_DEVICE_TABLE(i2c, tsl4531_id);
 static struct i2c_driver tsl4531_driver = {
        .driver = {
                .name   = TSL4531_DRV_NAME,
-               .pm     = &tsl4531_pm_ops,
+               .pm     = TSL4531_PM_OPS,
                .owner  = THIS_MODULE,
        },
        .probe  = tsl4531_probe,
index 5e780ef206f3c48774cb3f2d3a587ba0c6be1571..8349cc0fdf66b2c984f2592101a7fcd1613c4ce4 100644 (file)
@@ -330,7 +330,7 @@ static int as3935_probe(struct spi_device *spi)
                return -EINVAL;
        }
 
-       indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(st));
+       indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
        if (!indio_dev)
                return -ENOMEM;
 
index 3effa931fce259cdf661af95f42ad1463450acf4..10641b7816f49e493dd2e1f091d0921d910c2475 100644 (file)
@@ -115,9 +115,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
        attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
        /*
         * FIXME: Use devattr.max_sge - 2 for max_send_sge as
-        * work-around for RDMA_READ..
+        * work-around for RDMA_READs with ConnectX-2.
+        *
+        * Also, still make sure to have at least two SGEs for
+        * outgoing control PDU responses.
         */
-       attr.cap.max_send_sge = device->dev_attr.max_sge - 2;
+       attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
        isert_conn->max_sge = attr.cap.max_send_sge;
 
        attr.cap.max_recv_sge = 1;
@@ -225,12 +228,16 @@ isert_create_device_ib_res(struct isert_device *device)
        struct isert_cq_desc *cq_desc;
        struct ib_device_attr *dev_attr;
        int ret = 0, i, j;
+       int max_rx_cqe, max_tx_cqe;
 
        dev_attr = &device->dev_attr;
        ret = isert_query_device(ib_dev, dev_attr);
        if (ret)
                return ret;
 
+       max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
+       max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
+
        /* asign function handlers */
        if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
            dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
@@ -272,7 +279,7 @@ isert_create_device_ib_res(struct isert_device *device)
                                                isert_cq_rx_callback,
                                                isert_cq_event_callback,
                                                (void *)&cq_desc[i],
-                                               ISER_MAX_RX_CQ_LEN, i);
+                                               max_rx_cqe, i);
                if (IS_ERR(device->dev_rx_cq[i])) {
                        ret = PTR_ERR(device->dev_rx_cq[i]);
                        device->dev_rx_cq[i] = NULL;
@@ -284,7 +291,7 @@ isert_create_device_ib_res(struct isert_device *device)
                                                isert_cq_tx_callback,
                                                isert_cq_event_callback,
                                                (void *)&cq_desc[i],
-                                               ISER_MAX_TX_CQ_LEN, i);
+                                               max_tx_cqe, i);
                if (IS_ERR(device->dev_tx_cq[i])) {
                        ret = PTR_ERR(device->dev_tx_cq[i]);
                        device->dev_tx_cq[i] = NULL;
@@ -803,14 +810,25 @@ wake_up:
        complete(&isert_conn->conn_wait);
 }
 
-static void
+static int
 isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
 {
-       struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
+       struct isert_conn *isert_conn;
+
+       if (!cma_id->qp) {
+               struct isert_np *isert_np = cma_id->context;
+
+               isert_np->np_cm_id = NULL;
+               return -1;
+       }
+
+       isert_conn = (struct isert_conn *)cma_id->context;
 
        isert_conn->disconnect = disconnect;
        INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
        schedule_work(&isert_conn->conn_logout_work);
+
+       return 0;
 }
 
 static int
@@ -825,6 +843,9 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        switch (event->event) {
        case RDMA_CM_EVENT_CONNECT_REQUEST:
                ret = isert_connect_request(cma_id, event);
+               if (ret)
+                       pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
+                               event->event, ret);
                break;
        case RDMA_CM_EVENT_ESTABLISHED:
                isert_connected_handler(cma_id);
@@ -834,7 +855,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
                disconnect = true;
        case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
-               isert_disconnected_handler(cma_id, disconnect);
+               ret = isert_disconnected_handler(cma_id, disconnect);
                break;
        case RDMA_CM_EVENT_CONNECT_ERROR:
        default:
@@ -842,12 +863,6 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
                break;
        }
 
-       if (ret != 0) {
-               pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
-                      event->event, ret);
-               dump_stack();
-       }
-
        return ret;
 }
 
@@ -3190,7 +3205,8 @@ isert_free_np(struct iscsi_np *np)
 {
        struct isert_np *isert_np = (struct isert_np *)np->np_context;
 
-       rdma_destroy_id(isert_np->np_cm_id);
+       if (isert_np->np_cm_id)
+               rdma_destroy_id(isert_np->np_cm_id);
 
        np->np_context = NULL;
        kfree(isert_np);
index 7206547c13ce0dc8dda59458658d843d64520327..dc829682701ad1dbae8375eb2ff9a2c97feea48f 100644 (file)
@@ -2092,6 +2092,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
        if (!qp_init)
                goto out;
 
+retry:
        ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
                              ch->rq_size + srp_sq_size, 0);
        if (IS_ERR(ch->cq)) {
@@ -2115,6 +2116,13 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
        ch->qp = ib_create_qp(sdev->pd, qp_init);
        if (IS_ERR(ch->qp)) {
                ret = PTR_ERR(ch->qp);
+               if (ret == -ENOMEM) {
+                       srp_sq_size /= 2;
+                       if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
+                               ib_destroy_cq(ch->cq);
+                               goto retry;
+                       }
+               }
                printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
                goto err_destroy_cq;
        }
index bc203485716d870205a22d38add99ad6c1a9ea1e..8afa28e4570ed099bb3fb9fc4b2d7e1c1a5ba9d6 100644 (file)
@@ -421,7 +421,7 @@ static int evdev_open(struct inode *inode, struct file *file)
 
  err_free_client:
        evdev_detach_client(evdev, client);
-       kfree(client);
+       kvfree(client);
        return error;
 }
 
index 2ed7905a068fc9033e8998e547bd7d750b1fedb9..fc55f0d15b70118a3a5be5fc221f151475f014e3 100644 (file)
@@ -1179,9 +1179,19 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
                }
 
                ep_irq_in = &intf->cur_altsetting->endpoint[1].desc;
-               usb_fill_bulk_urb(xpad->bulk_out, udev,
-                               usb_sndbulkpipe(udev, ep_irq_in->bEndpointAddress),
-                               xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad);
+               if (usb_endpoint_is_bulk_out(ep_irq_in)) {
+                       usb_fill_bulk_urb(xpad->bulk_out, udev,
+                                         usb_sndbulkpipe(udev,
+                                                         ep_irq_in->bEndpointAddress),
+                                         xpad->bdata, XPAD_PKT_LEN,
+                                         xpad_bulk_out, xpad);
+               } else {
+                       usb_fill_int_urb(xpad->bulk_out, udev,
+                                        usb_sndintpipe(udev,
+                                                       ep_irq_in->bEndpointAddress),
+                                        xpad->bdata, XPAD_PKT_LEN,
+                                        xpad_bulk_out, xpad, 0);
+               }
 
                /*
                 * Submit the int URB immediately rather than waiting for open
index fb3b63b2f85c3615619452ef9ee2e6f0528d8245..8400a1a34d87569a5d81febffb75e97d91365ee5 100644 (file)
@@ -85,6 +85,7 @@ static int twl4030_pwrbutton_probe(struct platform_device *pdev)
        }
 
        platform_set_drvdata(pdev, pwr);
+       device_init_wakeup(&pdev->dev, true);
 
        return 0;
 }
index 2b0ae8cc8e51bc3ca52622f66e5909b537bd0153..d125a019383f10155dcafb88903f47e0f5297080 100644 (file)
@@ -1156,7 +1156,13 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
 {
        struct alps_data *priv = psmouse->private;
 
-       if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
+       /*
+        * Check if we are dealing with a bare PS/2 packet, presumably from
+        * a device connected to the external PS/2 port. Because bare PS/2
+        * protocol does not have enough constant bits to self-synchronize
+        * properly we only do this if the device is fully synchronized.
+        */
+       if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
                if (psmouse->pktcnt == 3) {
                        alps_report_bare_ps2_packet(psmouse, psmouse->packet,
                                                    true);
@@ -1180,12 +1186,27 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
        }
 
        /* Bytes 2 - pktsize should have 0 in the highest bit */
-       if ((priv->proto_version < ALPS_PROTO_V5) &&
+       if (priv->proto_version < ALPS_PROTO_V5 &&
            psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
            (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
                psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
                            psmouse->pktcnt - 1,
                            psmouse->packet[psmouse->pktcnt - 1]);
+
+               if (priv->proto_version == ALPS_PROTO_V3 &&
+                   psmouse->pktcnt == psmouse->pktsize) {
+                       /*
+                        * Some Dell boxes, such as Latitude E6440 or E7440
+                        * with closed lid, quite often smash last byte of
+                        * otherwise valid packet with 0xff. Given that the
+                        * next packet is very likely to be valid let's
+                        * report PSMOUSE_FULL_PACKET but not process data,
+                        * rather than reporting PSMOUSE_BAD_DATA and
+                        * filling the logs.
+                        */
+                       return PSMOUSE_FULL_PACKET;
+               }
+
                return PSMOUSE_BAD_DATA;
        }
 
@@ -2389,6 +2410,9 @@ int alps_init(struct psmouse *psmouse)
        /* We are having trouble resyncing ALPS touchpads so disable it for now */
        psmouse->resync_time = 0;
 
+       /* Allow 2 invalid packets without resetting device */
+       psmouse->resetafter = psmouse->pktsize * 2;
+
        return 0;
 
 init_fail:
index 06fc6e76ffbe0cf725053b8a1bdb3faaefa1c629..f2b97802640755aacfcde04005b125717cb63818 100644 (file)
@@ -428,14 +428,6 @@ static void elantech_report_trackpoint(struct psmouse *psmouse,
        int x, y;
        u32 t;
 
-       if (dev_WARN_ONCE(&psmouse->ps2dev.serio->dev,
-                         !tp_dev,
-                         psmouse_fmt("Unexpected trackpoint message\n"))) {
-               if (etd->debug == 1)
-                       elantech_packet_dump(psmouse);
-               return;
-       }
-
        t = get_unaligned_le32(&packet[0]);
 
        switch (t & ~7U) {
@@ -563,6 +555,7 @@ static void elantech_input_sync_v4(struct psmouse *psmouse)
        } else {
                input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
                input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+               input_report_key(dev, BTN_MIDDLE, packet[0] & 0x04);
        }
 
        input_mt_report_pointer_emulation(dev, true);
@@ -792,6 +785,9 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
        unsigned char packet_type = packet[3] & 0x03;
        bool sanity_check;
 
+       if (etd->tp_dev && (packet[3] & 0x0f) == 0x06)
+               return PACKET_TRACKPOINT;
+
        /*
         * Sanity check based on the constant bits of a packet.
         * The constant bits change depending on the value of
@@ -877,10 +873,19 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
 
        case 4:
                packet_type = elantech_packet_check_v4(psmouse);
-               if (packet_type == PACKET_UNKNOWN)
+               switch (packet_type) {
+               case PACKET_UNKNOWN:
                        return PSMOUSE_BAD_DATA;
 
-               elantech_report_absolute_v4(psmouse, packet_type);
+               case PACKET_TRACKPOINT:
+                       elantech_report_trackpoint(psmouse, packet_type);
+                       break;
+
+               default:
+                       elantech_report_absolute_v4(psmouse, packet_type);
+                       break;
+               }
+
                break;
        }
 
@@ -1119,6 +1124,22 @@ static void elantech_set_buttonpad_prop(struct psmouse *psmouse)
        }
 }
 
+/*
+ * Some hw_version 4 models do have a middle button
+ */
+static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+       {
+               /* Fujitsu H730 has a middle button */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
+               },
+       },
+#endif
+       { }
+};
+
 /*
  * Set the appropriate event bits for the input subsystem
  */
@@ -1138,6 +1159,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
        __clear_bit(EV_REL, dev->evbit);
 
        __set_bit(BTN_LEFT, dev->keybit);
+       if (dmi_check_system(elantech_dmi_has_middle_button))
+               __set_bit(BTN_MIDDLE, dev->keybit);
        __set_bit(BTN_RIGHT, dev->keybit);
 
        __set_bit(BTN_TOUCH, dev->keybit);
@@ -1299,6 +1322,7 @@ ELANTECH_INT_ATTR(reg_25, 0x25);
 ELANTECH_INT_ATTR(reg_26, 0x26);
 ELANTECH_INT_ATTR(debug, 0);
 ELANTECH_INT_ATTR(paritycheck, 0);
+ELANTECH_INT_ATTR(crc_enabled, 0);
 
 static struct attribute *elantech_attrs[] = {
        &psmouse_attr_reg_07.dattr.attr,
@@ -1313,6 +1337,7 @@ static struct attribute *elantech_attrs[] = {
        &psmouse_attr_reg_26.dattr.attr,
        &psmouse_attr_debug.dattr.attr,
        &psmouse_attr_paritycheck.dattr.attr,
+       &psmouse_attr_crc_enabled.dattr.attr,
        NULL
 };
 
@@ -1438,6 +1463,22 @@ static int elantech_reconnect(struct psmouse *psmouse)
        return 0;
 }
 
+/*
+ * Some hw_version 4 models do not work with crc_disabled
+ */
+static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+       {
+               /* Fujitsu H730 does not work with crc_enabled == 0 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
+               },
+       },
+#endif
+       { }
+};
+
 /*
  * Some hw_version 3 models go into error state when we try to set
  * bit 3 and/or bit 1 of r10.
@@ -1513,7 +1554,8 @@ static int elantech_set_properties(struct elantech_data *etd)
         * The signatures of v3 and v4 packets change depending on the
         * value of this hardware flag.
         */
-       etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
+       etd->crc_enabled = (etd->fw_version & 0x4000) == 0x4000 ||
+                          dmi_check_system(elantech_dmi_force_crc_enabled);
 
        /* Enable real hardware resolution on hw_version 3 ? */
        etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table);
index 9031a0a28ea4f2cf0c086036334e975a3fdbdf35..f9472920d986368f7aa83eb7d0621489d774b050 100644 (file)
@@ -135,14 +135,18 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
                1232, 5710, 1156, 4696
        },
        {
-               (const char * const []){"LEN0034", "LEN0036", "LEN2002",
-                                       "LEN2004", NULL},
+               (const char * const []){"LEN0034", "LEN0036", "LEN0039",
+                                       "LEN2002", "LEN2004", NULL},
                1024, 5112, 2024, 4832
        },
        {
                (const char * const []){"LEN2001", NULL},
                1024, 5022, 2508, 4832
        },
+       {
+               (const char * const []){"LEN2006", NULL},
+               1264, 5675, 1171, 4688
+       },
        { }
 };
 
@@ -163,6 +167,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN0036", /* T440 */
        "LEN0037",
        "LEN0038",
+       "LEN0039", /* T440s */
        "LEN0041",
        "LEN0042", /* Yoga */
        "LEN0045",
index 90d734bbf467a2cec462282191f64c8f2131c1ae..1e6360e7ae44790bb28cb6c11be043637c4d0652 100644 (file)
@@ -92,13 +92,6 @@ static spinlock_t state_lock;
 
 static struct workqueue_struct *iommu_wq;
 
-/*
- * Empty page table - Used between
- * mmu_notifier_invalidate_range_start and
- * mmu_notifier_invalidate_range_end
- */
-static u64 *empty_page_table;
-
 static void free_pasid_states(struct device_state *dev_state);
 
 static u16 device_id(struct pci_dev *pdev)
@@ -279,10 +272,8 @@ static void free_pasid_state(struct pasid_state *pasid_state)
 
 static void put_pasid_state(struct pasid_state *pasid_state)
 {
-       if (atomic_dec_and_test(&pasid_state->count)) {
-               put_device_state(pasid_state->device_state);
+       if (atomic_dec_and_test(&pasid_state->count))
                wake_up(&pasid_state->wq);
-       }
 }
 
 static void put_pasid_state_wait(struct pasid_state *pasid_state)
@@ -291,9 +282,7 @@ static void put_pasid_state_wait(struct pasid_state *pasid_state)
 
        prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
 
-       if (atomic_dec_and_test(&pasid_state->count))
-               put_device_state(pasid_state->device_state);
-       else
+       if (!atomic_dec_and_test(&pasid_state->count))
                schedule();
 
        finish_wait(&pasid_state->wq, &wait);
@@ -418,46 +407,21 @@ static void mn_invalidate_page(struct mmu_notifier *mn,
        __mn_flush_page(mn, address);
 }
 
-static void mn_invalidate_range_start(struct mmu_notifier *mn,
-                                     struct mm_struct *mm,
-                                     unsigned long start, unsigned long end)
-{
-       struct pasid_state *pasid_state;
-       struct device_state *dev_state;
-       unsigned long flags;
-
-       pasid_state = mn_to_state(mn);
-       dev_state   = pasid_state->device_state;
-
-       spin_lock_irqsave(&pasid_state->lock, flags);
-       if (pasid_state->mmu_notifier_count == 0) {
-               amd_iommu_domain_set_gcr3(dev_state->domain,
-                                         pasid_state->pasid,
-                                         __pa(empty_page_table));
-       }
-       pasid_state->mmu_notifier_count += 1;
-       spin_unlock_irqrestore(&pasid_state->lock, flags);
-}
-
-static void mn_invalidate_range_end(struct mmu_notifier *mn,
-                                   struct mm_struct *mm,
-                                   unsigned long start, unsigned long end)
+static void mn_invalidate_range(struct mmu_notifier *mn,
+                               struct mm_struct *mm,
+                               unsigned long start, unsigned long end)
 {
        struct pasid_state *pasid_state;
        struct device_state *dev_state;
-       unsigned long flags;
 
        pasid_state = mn_to_state(mn);
        dev_state   = pasid_state->device_state;
 
-       spin_lock_irqsave(&pasid_state->lock, flags);
-       pasid_state->mmu_notifier_count -= 1;
-       if (pasid_state->mmu_notifier_count == 0) {
-               amd_iommu_domain_set_gcr3(dev_state->domain,
-                                         pasid_state->pasid,
-                                         __pa(pasid_state->mm->pgd));
-       }
-       spin_unlock_irqrestore(&pasid_state->lock, flags);
+       if ((start ^ (end - 1)) < PAGE_SIZE)
+               amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
+                                    start);
+       else
+               amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
 }
 
 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -482,8 +446,7 @@ static struct mmu_notifier_ops iommu_mn = {
        .release                = mn_release,
        .clear_flush_young      = mn_clear_flush_young,
        .invalidate_page        = mn_invalidate_page,
-       .invalidate_range_start = mn_invalidate_range_start,
-       .invalidate_range_end   = mn_invalidate_range_end,
+       .invalidate_range       = mn_invalidate_range,
 };
 
 static void set_pri_tag_status(struct pasid_state *pasid_state,
@@ -954,18 +917,10 @@ static int __init amd_iommu_v2_init(void)
        if (iommu_wq == NULL)
                goto out;
 
-       ret = -ENOMEM;
-       empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
-       if (empty_page_table == NULL)
-               goto out_destroy_wq;
-
        amd_iommu_register_ppr_notifier(&ppr_nb);
 
        return 0;
 
-out_destroy_wq:
-       destroy_workqueue(iommu_wq);
-
 out:
        return ret;
 }
@@ -999,8 +954,6 @@ static void __exit amd_iommu_v2_exit(void)
        }
 
        destroy_workqueue(iommu_wq);
-
-       free_page((unsigned long)empty_page_table);
 }
 
 module_init(amd_iommu_v2_init);
index 3e238cd049e602540984d770e7ba1c930841703a..6a2e168c3ab0fdae090000ba2d2a3659501499ab 100644 (file)
@@ -43,6 +43,7 @@
 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS    (0x34)
 #define ARMADA_370_XP_INT_SOURCE_CTL(irq)      (0x100 + irq*4)
 #define ARMADA_370_XP_INT_SOURCE_CPU_MASK      0xF
+#define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)  ((BIT(0) | BIT(8)) << cpuid)
 
 #define ARMADA_370_XP_CPU_INTACK_OFFS          (0x44)
 #define ARMADA_375_PPI_CAUSE                   (0x10)
@@ -406,19 +407,29 @@ static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
                                                  struct irq_desc *desc)
 {
        struct irq_chip *chip = irq_get_chip(irq);
-       unsigned long irqmap, irqn;
+       unsigned long irqmap, irqn, irqsrc, cpuid;
        unsigned int cascade_irq;
 
        chained_irq_enter(chip, desc);
 
        irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
-
-       if (irqmap & BIT(0)) {
-               armada_370_xp_handle_msi_irq(NULL, true);
-               irqmap &= ~BIT(0);
-       }
+       cpuid = cpu_logical_map(smp_processor_id());
 
        for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
+               irqsrc = readl_relaxed(main_int_base +
+                                      ARMADA_370_XP_INT_SOURCE_CTL(irqn));
+
+               /* Check if the interrupt is not masked on current CPU.
+                * Test IRQ (0-1) and FIQ (8-9) mask bits.
+                */
+               if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)))
+                       continue;
+
+               if (irqn == 1) {
+                       armada_370_xp_handle_msi_irq(NULL, true);
+                       continue;
+               }
+
                cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn);
                generic_handle_irq(cascade_irq);
        }
index 6ae3cdee0681a8008218fbcf25762280b64e48ce..cc4f9d80122ea618e7543f4885843359194770a7 100644 (file)
@@ -217,8 +217,9 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
        }
 
        ret = irq_alloc_domain_generic_chips(domain, 32, 1, name,
-                                            handle_level_irq, 0, 0,
-                                            IRQCHIP_SKIP_SET_WAKE);
+                                            handle_fasteoi_irq,
+                                            IRQ_NOREQUEST | IRQ_NOPROBE |
+                                            IRQ_NOAUTOEN, 0, 0);
        if (ret)
                goto err_domain_remove;
 
@@ -230,7 +231,6 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
                gc->unused = 0;
                gc->wake_enabled = ~0;
                gc->chip_types[0].type = IRQ_TYPE_SENSE_MASK;
-               gc->chip_types[0].handler = handle_fasteoi_irq;
                gc->chip_types[0].chip.irq_eoi = irq_gc_eoi;
                gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
                gc->chip_types[0].chip.irq_shutdown = aic_common_shutdown;
index b9f4fb808e49a4afefa0bf66c707dbc01d7c3fa2..5fb38a2ac2261ca06c5bb338ae044a9ed61dc361 100644 (file)
@@ -101,9 +101,9 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn,
        int parent_irq;
 
        parent_irq = irq_of_parse_and_map(dn, irq);
-       if (parent_irq < 0) {
+       if (!parent_irq) {
                pr_err("failed to map interrupt %d\n", irq);
-               return parent_irq;
+               return -EINVAL;
        }
 
        data->irq_map_mask |= be32_to_cpup(map_mask + irq);
index c15c840987d2808e82cf1b056c231005933c5f8b..14691a4cb84cdf82fb38eefc0081a07460efae7b 100644 (file)
@@ -135,9 +135,9 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
        __raw_writel(0xffffffff, data->base + CPU_CLEAR);
 
        data->parent_irq = irq_of_parse_and_map(np, 0);
-       if (data->parent_irq < 0) {
+       if (!data->parent_irq) {
                pr_err("failed to find parent interrupt\n");
-               ret = data->parent_irq;
+               ret = -EINVAL;
                goto out_unmap;
        }
 
index 825ca1f87639aae4bf1c0f60567bc163c75b338b..afe79719ea329e72e0d8e8433222f75fe4a9ac0a 100644 (file)
@@ -1434,9 +1434,9 @@ static void drop_buffers(struct dm_bufio_client *c)
 
 /*
  * Test if the buffer is unused and too old, and commit it.
- * At if noio is set, we must not do any I/O because we hold
- * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
- * different bufio client.
+ * And if GFP_NOFS is used, we must not do any I/O because we hold
+ * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
+ * rerouted to different bufio client.
  */
 static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
                                unsigned long max_jiffies)
@@ -1444,7 +1444,7 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
        if (jiffies - b->last_accessed < max_jiffies)
                return 0;
 
-       if (!(gfp & __GFP_IO)) {
+       if (!(gfp & __GFP_FS)) {
                if (test_bit(B_READING, &b->state) ||
                    test_bit(B_WRITING, &b->state) ||
                    test_bit(B_DIRTY, &b->state))
@@ -1486,7 +1486,7 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        unsigned long freed;
 
        c = container_of(shrink, struct dm_bufio_client, shrinker);
-       if (sc->gfp_mask & __GFP_IO)
+       if (sc->gfp_mask & __GFP_FS)
                dm_bufio_lock(c);
        else if (!dm_bufio_trylock(c))
                return SHRINK_STOP;
@@ -1503,7 +1503,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
        unsigned long count;
 
        c = container_of(shrink, struct dm_bufio_client, shrinker);
-       if (sc->gfp_mask & __GFP_IO)
+       if (sc->gfp_mask & __GFP_FS)
                dm_bufio_lock(c);
        else if (!dm_bufio_trylock(c))
                return 0;
index 4857fa4a5484ba8e4ae743f2e8d4a48205229875..07c0fa0fa284fbdc9e86673c219f99cd07a35f77 100644 (file)
@@ -789,8 +789,7 @@ struct dm_raid_superblock {
        __le32 layout;
        __le32 stripe_sectors;
 
-       __u8 pad[452];          /* Round struct to 512 bytes. */
-                               /* Always set to 0 when writing. */
+       /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
 } __packed;
 
 static int read_disk_sb(struct md_rdev *rdev, int size)
@@ -827,7 +826,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
                    test_bit(Faulty, &(rs->dev[i].rdev.flags)))
                        failed_devices |= (1ULL << i);
 
-       memset(sb, 0, sizeof(*sb));
+       memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
 
        sb->magic = cpu_to_le32(DM_RAID_MAGIC);
        sb->features = cpu_to_le32(0);  /* No features yet */
@@ -862,7 +861,11 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
        uint64_t events_sb, events_refsb;
 
        rdev->sb_start = 0;
-       rdev->sb_size = sizeof(*sb);
+       rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
+       if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
+               DMERR("superblock size of a logical block is no longer valid");
+               return -EINVAL;
+       }
 
        ret = read_disk_sb(rdev, rdev->sb_size);
        if (ret)
@@ -1169,8 +1172,12 @@ static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
        raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
 
        for (i = 0; i < rs->md.raid_disks; i++) {
-               struct request_queue *q = bdev_get_queue(rs->dev[i].rdev.bdev);
+               struct request_queue *q;
+
+               if (!rs->dev[i].rdev.bdev)
+                       continue;
 
+               q = bdev_get_queue(rs->dev[i].rdev.bdev);
                if (!q || !blk_queue_discard(q))
                        return;
 
index d1600d2aa2e2e6983643ef0ef864195f858d4f9d..f8b37d4c05d8c301658a42c116110d1b63fc9323 100644 (file)
@@ -159,8 +159,10 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                sc->stripes_shift = __ffs(stripes);
 
        r = dm_set_target_max_io_len(ti, chunk_size);
-       if (r)
+       if (r) {
+               kfree(sc);
                return r;
+       }
 
        ti->num_flush_bios = stripes;
        ti->num_discard_bios = stripes;
index 4843801173fe11a99519b59dd808e46e02425ee4..0f86d802b533301bf8374eb07cd9224dd06c799c 100644 (file)
@@ -1936,6 +1936,14 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
                return DM_MAPIO_SUBMITTED;
        }
 
+       /*
+        * We must hold the virtual cell before doing the lookup, otherwise
+        * there's a race with discard.
+        */
+       build_virtual_key(tc->td, block, &key);
+       if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
+               return DM_MAPIO_SUBMITTED;
+
        r = dm_thin_find_block(td, block, 0, &result);
 
        /*
@@ -1959,13 +1967,10 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
                         * shared flag will be set in their case.
                         */
                        thin_defer_bio(tc, bio);
+                       cell_defer_no_holder_no_free(tc, &cell1);
                        return DM_MAPIO_SUBMITTED;
                }
 
-               build_virtual_key(tc->td, block, &key);
-               if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
-                       return DM_MAPIO_SUBMITTED;
-
                build_data_key(tc->td, result.block, &key);
                if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
                        cell_defer_no_holder_no_free(tc, &cell1);
@@ -1986,6 +1991,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
                         * of doing so.
                         */
                        handle_unserviceable_bio(tc->pool, bio);
+                       cell_defer_no_holder_no_free(tc, &cell1);
                        return DM_MAPIO_SUBMITTED;
                }
                /* fall through */
@@ -1996,6 +2002,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
                 * provide the hint to load the metadata into cache.
                 */
                thin_defer_bio(tc, bio);
+               cell_defer_no_holder_no_free(tc, &cell1);
                return DM_MAPIO_SUBMITTED;
 
        default:
@@ -2005,6 +2012,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
                 * pool is switched to fail-io mode.
                 */
                bio_io_error(bio);
+               cell_defer_no_holder_no_free(tc, &cell1);
                return DM_MAPIO_SUBMITTED;
        }
 }
index 4dfa15da9cb8344da4aae1fd28a78f05d84d8563..9233c71138f18a62a4c175d7acf9c3b8fd03a3d5 100644 (file)
@@ -5121,6 +5121,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
                printk("md: %s still in use.\n",mdname(mddev));
                if (did_freeze) {
                        clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+                       set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                        md_wakeup_thread(mddev->thread);
                }
                err = -EBUSY;
@@ -5135,6 +5136,8 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
                mddev->ro = 1;
                set_disk_ro(mddev->gendisk, 1);
                clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+               md_wakeup_thread(mddev->thread);
                sysfs_notify_dirent_safe(mddev->sysfs_state);
                err = 0;
        }
@@ -5178,6 +5181,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
                mutex_unlock(&mddev->open_mutex);
                if (did_freeze) {
                        clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+                       set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                        md_wakeup_thread(mddev->thread);
                }
                return -EBUSY;
index 37d367bb9aa8976653d5bbc80b29d0dfbf037372..bf2b80d5c4707a64210b5e57deb785069dc7d921 100644 (file)
@@ -42,6 +42,12 @@ struct btree_node {
 } __packed;
 
 
+/*
+ * Locks a block using the btree node validator.
+ */
+int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
+                struct dm_block **result);
+
 void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
                  struct dm_btree_value_type *vt);
 
index cf9fd676ae444ad29f8fb6f6ef58d85706438092..1b5e13ec7f96a670ed7a9b5b472a5d2ee95a7dff 100644 (file)
@@ -92,7 +92,7 @@ struct dm_block_validator btree_node_validator = {
 
 /*----------------------------------------------------------------*/
 
-static int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
+int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
                 struct dm_block **result)
 {
        return dm_tm_read_lock(info->tm, b, &btree_node_validator, result);
index 416060c2570981d5035376fc02b100f3b6430e5b..200ac12a1d407b5c648c7271a995a022ac8e7efc 100644 (file)
@@ -847,22 +847,26 @@ EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key);
  * FIXME: We shouldn't use a recursive algorithm when we have limited stack
  * space.  Also this only works for single level trees.
  */
-static int walk_node(struct ro_spine *s, dm_block_t block,
+static int walk_node(struct dm_btree_info *info, dm_block_t block,
                     int (*fn)(void *context, uint64_t *keys, void *leaf),
                     void *context)
 {
        int r;
        unsigned i, nr;
+       struct dm_block *node;
        struct btree_node *n;
        uint64_t keys;
 
-       r = ro_step(s, block);
-       n = ro_node(s);
+       r = bn_read_lock(info, block, &node);
+       if (r)
+               return r;
+
+       n = dm_block_data(node);
 
        nr = le32_to_cpu(n->header.nr_entries);
        for (i = 0; i < nr; i++) {
                if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
-                       r = walk_node(s, value64(n, i), fn, context);
+                       r = walk_node(info, value64(n, i), fn, context);
                        if (r)
                                goto out;
                } else {
@@ -874,7 +878,7 @@ static int walk_node(struct ro_spine *s, dm_block_t block,
        }
 
 out:
-       ro_pop(s);
+       dm_tm_unlock(info->tm, node);
        return r;
 }
 
@@ -882,15 +886,7 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
                  int (*fn)(void *context, uint64_t *keys, void *leaf),
                  void *context)
 {
-       int r;
-       struct ro_spine spine;
-
        BUG_ON(info->levels > 1);
-
-       init_ro_spine(&spine, info);
-       r = walk_node(&spine, root, fn, context);
-       exit_ro_spine(&spine);
-
-       return r;
+       return walk_node(info, root, fn, context);
 }
 EXPORT_SYMBOL_GPL(dm_btree_walk);
index b8579ee68bd603567fa8a8827b279683f00a7928..2cf30576bf39cca0b4ec889ca22705d0f2a185aa 100644 (file)
@@ -962,6 +962,11 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
        case SYS_ATSC:
                c->modulation = VSB_8;
                break;
+       case SYS_ISDBS:
+               c->symbol_rate = 28860000;
+               c->rolloff = ROLLOFF_35;
+               c->bandwidth_hz = c->symbol_rate / 100 * 135;
+               break;
        default:
                c->modulation = QAM_AUTO;
                break;
@@ -2072,6 +2077,7 @@ static int dtv_set_frontend(struct dvb_frontend *fe)
                break;
        case SYS_DVBS:
        case SYS_TURBO:
+       case SYS_ISDBS:
                rolloff = 135;
                break;
        case SYS_DVBS2:
index 335daeff91b941d97854d74292435f977e3cc2a5..9d0d0347758f66b178f49c3db5387ba678fa30aa 100644 (file)
@@ -864,6 +864,13 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
        memcpy(&state->frontend.ops, &ds3000_ops,
                        sizeof(struct dvb_frontend_ops));
        state->frontend.demodulator_priv = state;
+
+       /*
+        * Some devices like T480 starts with voltage on. Be sure
+        * to turn voltage off during init, as this can otherwise
+        * interfere with Unicable SCR systems.
+        */
+       ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF);
        return &state->frontend;
 
 error3:
index 9b684d5c8f91971edc28f0ddeebdafbc0225da50..15bf4318cb744408683ca5f69189e6d1e5a38307 100644 (file)
@@ -266,7 +266,7 @@ int sp2_ci_poll_slot_status(struct dvb_ca_en50221 *en50221,
        return s->status;
 }
 
-int sp2_init(struct sp2 *s)
+static int sp2_init(struct sp2 *s)
 {
        int ret = 0;
        u8 buf;
@@ -348,7 +348,7 @@ err:
        return ret;
 }
 
-int sp2_exit(struct i2c_client *client)
+static int sp2_exit(struct i2c_client *client)
 {
        struct sp2 *s;
 
index d9905fb52f847132e5baa1fa530bad11afb99693..b35d65c9cc059c3ee4ba4482c9cd629be07bd1b3 100644 (file)
@@ -216,32 +216,30 @@ static int tc90522s_get_frontend(struct dvb_frontend *fe)
        c->delivery_system = SYS_ISDBS;
 
        layers = 0;
-       ret = reg_read(state, 0xe8, val, 3);
+       ret = reg_read(state, 0xe6, val, 5);
        if (ret == 0) {
-               int slots;
                u8 v;
 
+               c->stream_id = val[0] << 8 | val[1];
+
                /* high/single layer */
-               v = (val[0] & 0x70) >> 4;
+               v = (val[2] & 0x70) >> 4;
                c->modulation = (v == 7) ? PSK_8 : QPSK;
                c->fec_inner = fec_conv_sat[v];
                c->layer[0].fec = c->fec_inner;
                c->layer[0].modulation = c->modulation;
-               c->layer[0].segment_count = val[1] & 0x3f; /* slots */
+               c->layer[0].segment_count = val[3] & 0x3f; /* slots */
 
                /* low layer */
-               v = (val[0] & 0x07);
+               v = (val[2] & 0x07);
                c->layer[1].fec = fec_conv_sat[v];
                if (v == 0)  /* no low layer */
                        c->layer[1].segment_count = 0;
                else
-                       c->layer[1].segment_count = val[2] & 0x3f; /* slots */
+                       c->layer[1].segment_count = val[4] & 0x3f; /* slots */
                /* actually, BPSK if v==1, but not defined in fe_modulation_t */
                c->layer[1].modulation = QPSK;
                layers = (v > 0) ? 2 : 1;
-
-               slots =  c->layer[0].segment_count +  c->layer[1].segment_count;
-               c->symbol_rate = 28860000 * slots / 48;
        }
 
        /* statistics */
@@ -363,7 +361,7 @@ static int tc90522t_get_frontend(struct dvb_frontend *fe)
                u8 v;
 
                c->isdbt_partial_reception = val[0] & 0x01;
-               c->isdbt_sb_mode = (val[0] & 0xc0) == 0x01;
+               c->isdbt_sb_mode = (val[0] & 0xc0) == 0x40;
 
                /* layer A */
                v = (val[2] & 0x78) >> 3;
index 932ed9be9ff3d756fb35948e8183e09faaa60c34..b10aaeda2bb4599576849a8a5b786dcc2542aa49 100644 (file)
@@ -2190,7 +2190,7 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev,
                ret = smiapp_set_compose(subdev, fh, sel);
                break;
        default:
-               BUG();
+               ret = -EINVAL;
        }
 
        mutex_unlock(&sensor->mutex);
index 331eddac7222ae7f62fb757148f90c8881b5ef4c..3bd386c371f74ab260b00cac9dcfaa9eadecc962 100644 (file)
@@ -1078,7 +1078,7 @@ static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
        for (line = 0; line < lines; line++) {
                while (offset && offset >= sg_dma_len(sg)) {
                        offset -= sg_dma_len(sg);
-                       sg++;
+                       sg = sg_next(sg);
                }
 
                if (lpi && line > 0 && !(line % lpi))
@@ -1101,14 +1101,14 @@ static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
                        *(rp++) = cpu_to_le32(0); /* bits 63-32 */
                        todo -= (sg_dma_len(sg)-offset);
                        offset = 0;
-                       sg++;
+                       sg = sg_next(sg);
                        while (todo > sg_dma_len(sg)) {
                                *(rp++) = cpu_to_le32(RISC_WRITE|
                                                    sg_dma_len(sg));
                                *(rp++) = cpu_to_le32(sg_dma_address(sg));
                                *(rp++) = cpu_to_le32(0); /* bits 63-32 */
                                todo -= sg_dma_len(sg);
-                               sg++;
+                               sg = sg_next(sg);
                        }
                        *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
                        *(rp++) = cpu_to_le32(sg_dma_address(sg));
index 172583d736fef31c30ab7338405e3d221d17211b..8cbe6b49f4c238de365b0a231eef8f4dff2425ff 100644 (file)
@@ -105,11 +105,8 @@ static irqreturn_t solo_isr(int irq, void *data)
        if (!status)
                return IRQ_NONE;
 
-       if (status & ~solo_dev->irq_mask) {
-               solo_reg_write(solo_dev, SOLO_IRQ_STAT,
-                              status & ~solo_dev->irq_mask);
-               status &= solo_dev->irq_mask;
-       }
+       /* Acknowledge all interrupts immediately */
+       solo_reg_write(solo_dev, SOLO_IRQ_STAT, status);
 
        if (status & SOLO_IRQ_PCI_ERR)
                solo_p2m_error_isr(solo_dev);
@@ -132,9 +129,6 @@ static irqreturn_t solo_isr(int irq, void *data)
        if (status & SOLO_IRQ_G723)
                solo_g723_isr(solo_dev);
 
-       /* Clear all interrupts handled */
-       solo_reg_write(solo_dev, SOLO_IRQ_STAT, status);
-
        return IRQ_HANDLED;
 }
 
index 2c61a62ab48b9845a0c4a16df29e40736762df40..686c3c2ad05bcdb1fb9e28246f78cccdef514872 100644 (file)
@@ -100,11 +100,9 @@ MODULE_PARM_DESC(ccs_out_mode, " output crop/compose/scale mode:\n"
                           "\t\t    bit 0=crop, 1=compose, 2=scale,\n"
                           "\t\t    -1=user-controlled (default)");
 
-static unsigned multiplanar[VIVID_MAX_DEVS];
+static unsigned multiplanar[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 1 };
 module_param_array(multiplanar, uint, NULL, 0444);
-MODULE_PARM_DESC(multiplanar, " 0 (default) is alternating single and multiplanar devices,\n"
-                             "\t\t    1 is single planar devices,\n"
-                             "\t\t    2 is multiplanar devices");
+MODULE_PARM_DESC(multiplanar, " 1 (default) creates a single planar device, 2 creates a multiplanar device.");
 
 /* Default: video + vbi-cap (raw and sliced) + radio rx + radio tx + sdr + vbi-out + vid-out */
 static unsigned node_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0x1d3d };
@@ -669,10 +667,7 @@ static int __init vivid_create_instance(int inst)
        /* start detecting feature set */
 
        /* do we use single- or multi-planar? */
-       if (multiplanar[inst] == 0)
-               dev->multiplanar = inst & 1;
-       else
-               dev->multiplanar = multiplanar[inst] > 1;
+       dev->multiplanar = multiplanar[inst] > 1;
        v4l2_info(&dev->v4l2_dev, "using %splanar format API\n",
                        dev->multiplanar ? "multi" : "single ");
 
index b8837dd39bb2a52740a36765d69d75af464f51fa..65f80b8b9f7ab922b39f3e02d8b878c097bae1fb 100644 (file)
@@ -1678,7 +1678,8 @@ static void imon_incoming_packet(struct imon_context *ictx,
                if (press_type == 0)
                        rc_keyup(ictx->rdev);
                else {
-                       if (ictx->rc_type == RC_BIT_RC6_MCE)
+                       if (ictx->rc_type == RC_BIT_RC6_MCE ||
+                           ictx->rc_type == RC_BIT_OTHER)
                                rc_keydown(ictx->rdev,
                                           ictx->rc_type == RC_BIT_RC6_MCE ? RC_TYPE_RC6_MCE : RC_TYPE_OTHER,
                                           ictx->rc_scancode, ictx->rc_toggle);
index 08bbd4f508cd283b54167a2010366b00d5c2ddc0..b0df62961c1403c4acd439c46254a15b3981cac7 100644 (file)
@@ -297,7 +297,7 @@ static int hix5hd2_ir_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int hix5hd2_ir_suspend(struct device *dev)
 {
        struct hix5hd2_ir_priv *priv = dev_get_drvdata(dev);
index 2ef763928ca440076951e9658174ec88e6ba1833..84fa6e9b59a1acd9360364fbd30ca0a4e00d3a56 100644 (file)
@@ -53,7 +53,7 @@ static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev)
        u32 scancode;
        enum rc_type protocol;
 
-       if (!(dev->enabled_protocols & (RC_BIT_RC5 | RC_BIT_RC5X)))
+       if (!(dev->enabled_protocols & (RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ)))
                return 0;
 
        if (!is_timing_event(ev)) {
index f1f098e22f7ea9f19b458223f226d5e73077bb2d..d16bc67af732251998fb280b86d887835cf39e1b 100644 (file)
@@ -259,8 +259,8 @@ again:
                        case 32:
                                if ((scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) {
                                        protocol = RC_TYPE_RC6_MCE;
-                                       scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
                                        toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK);
+                                       scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
                                } else {
                                        protocol = RC_BIT_RC6_6A_32;
                                        toggle = 0;
index e8fff2add265ca303c70abf605357ec7836439f1..b732ac6a26d8065cc26ecb3a648ced999db71367 100644 (file)
@@ -262,7 +262,6 @@ int ir_raw_event_register(struct rc_dev *dev)
                return -ENOMEM;
 
        dev->raw->dev = dev;
-       dev->enabled_protocols = ~0;
        dev->change_protocol = change_protocol;
        rc = kfifo_alloc(&dev->raw->kfifo,
                         sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
index a7991c7d010af0bc487db2fb6a8ca94bb08723ad..8d3b74c5a71766b5c79eceecc6b68d9a9b93ff0f 100644 (file)
@@ -1421,6 +1421,8 @@ int rc_register_device(struct rc_dev *dev)
 
        if (dev->change_protocol) {
                u64 rc_type = (1 << rc_map->rc_type);
+               if (dev->driver_type == RC_DRIVER_IR_RAW)
+                       rc_type |= RC_BIT_LIRC;
                rc = dev->change_protocol(dev, &rc_type);
                if (rc < 0)
                        goto out_raw;
index ccc00099b26144e4bd1c5b99cbfccbf266a50ad7..1c0dbf428a3af83c4460e51615232fd96a2180ab 100644 (file)
@@ -632,7 +632,7 @@ static void s2255_fillbuff(struct s2255_vc *vc,
                        break;
                case V4L2_PIX_FMT_JPEG:
                case V4L2_PIX_FMT_MJPEG:
-                       buf->vb.v4l2_buf.length = jpgsize;
+                       vb2_set_plane_payload(&buf->vb, 0, jpgsize);
                        memcpy(vbuf, tmpbuf, jpgsize);
                        break;
                case V4L2_PIX_FMT_YUV422P:
index cf008f45968c77fad5cf64677adf1adc77c77b46..711773e8e64bdd9ee5835c8ae9d738aecf31eeba 100644 (file)
@@ -240,7 +240,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
                goto err_irq_charger;
        }
 
-       ret = regmap_add_irq_chip(max77693->regmap, max77693->irq,
+       ret = regmap_add_irq_chip(max77693->regmap_muic, max77693->irq,
                                IRQF_ONESHOT | IRQF_SHARED |
                                IRQF_TRIGGER_FALLING, 0,
                                &max77693_muic_irq_chip,
@@ -250,6 +250,17 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
                goto err_irq_muic;
        }
 
+       /* Unmask interrupts from all blocks in interrupt source register */
+       ret = regmap_update_bits(max77693->regmap,
+                               MAX77693_PMIC_REG_INTSRC_MASK,
+                               SRC_IRQ_ALL, (unsigned int)~SRC_IRQ_ALL);
+       if (ret < 0) {
+               dev_err(max77693->dev,
+                       "Could not unmask interrupts in INTSRC: %d\n",
+                       ret);
+               goto err_intsrc;
+       }
+
        pm_runtime_set_active(max77693->dev);
 
        ret = mfd_add_devices(max77693->dev, -1, max77693_devs,
@@ -261,6 +272,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
 
 err_mfd:
        mfd_remove_devices(max77693->dev);
+err_intsrc:
        regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
 err_irq_muic:
        regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger);
index f2643c221d345ebca0b7a9ff3266443ac1f0801b..30f7ca89a0e68619319f69dcebb12037f6e4af92 100644 (file)
@@ -947,6 +947,7 @@ static void rtsx_pci_idle_work(struct work_struct *work)
        mutex_unlock(&pcr->pcr_mutex);
 }
 
+#ifdef CONFIG_PM
 static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
 {
        if (pcr->ops->turn_off_led)
@@ -961,6 +962,7 @@ static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
        if (pcr->ops->force_power_down)
                pcr->ops->force_power_down(pcr, pm_state);
 }
+#endif
 
 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
 {
index 2d045f26f193eb0e96ecf32ee1cc1f07b39ca99c..bee0abf82040001664c07e82c646e0e3e5afc259 100644 (file)
@@ -269,7 +269,7 @@ int stmpe_remove(struct stmpe *stmpe);
 #define STMPE24XX_REG_CHIP_ID          0x80
 #define STMPE24XX_REG_IEGPIOR_LSB      0x18
 #define STMPE24XX_REG_ISGPIOR_MSB      0x19
-#define STMPE24XX_REG_GPMR_LSB         0xA5
+#define STMPE24XX_REG_GPMR_LSB         0xA4
 #define STMPE24XX_REG_GPSR_LSB         0x85
 #define STMPE24XX_REG_GPCR_LSB         0x88
 #define STMPE24XX_REG_GPDR_LSB         0x8B
index cf92a6d1c532ac578d6bd9d042f4d1e8ebe55e7c..50f9091bcd383e86be1149813f92c0c1186a4df2 100644 (file)
@@ -44,6 +44,15 @@ static u8 twl4030_start_script_address = 0x2b;
 #define PWR_DEVSLP             BIT(1)
 #define PWR_DEVOFF             BIT(0)
 
+/* Register bits for CFG_P1_TRANSITION (also for P2 and P3) */
+#define STARTON_SWBUG          BIT(7)  /* Start on watchdog */
+#define STARTON_VBUS           BIT(5)  /* Start on VBUS */
+#define STARTON_VBAT           BIT(4)  /* Start on battery insert */
+#define STARTON_RTC            BIT(3)  /* Start on RTC */
+#define STARTON_USB            BIT(2)  /* Start on USB host */
+#define STARTON_CHG            BIT(1)  /* Start on charger */
+#define STARTON_PWON           BIT(0)  /* Start on PWRON button */
+
 #define SEQ_OFFSYNC            (1 << 0)
 
 #define PHY_TO_OFF_PM_MASTER(p)                (p - 0x36)
@@ -606,6 +615,44 @@ twl4030_power_configure_resources(const struct twl4030_power_data *pdata)
        return 0;
 }
 
+static int twl4030_starton_mask_and_set(u8 bitmask, u8 bitvalues)
+{
+       u8 regs[3] = { TWL4030_PM_MASTER_CFG_P1_TRANSITION,
+                      TWL4030_PM_MASTER_CFG_P2_TRANSITION,
+                      TWL4030_PM_MASTER_CFG_P3_TRANSITION, };
+       u8 val;
+       int i, err;
+
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+                              TWL4030_PM_MASTER_PROTECT_KEY);
+       if (err)
+               goto relock;
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
+                              TWL4030_PM_MASTER_KEY_CFG2,
+                              TWL4030_PM_MASTER_PROTECT_KEY);
+       if (err)
+               goto relock;
+
+       for (i = 0; i < sizeof(regs); i++) {
+               err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER,
+                                     &val, regs[i]);
+               if (err)
+                       break;
+               val = (~bitmask & val) | (bitmask & bitvalues);
+               err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
+                                      val, regs[i]);
+               if (err)
+                       break;
+       }
+
+       if (err)
+               pr_err("TWL4030 Register access failed: %i\n", err);
+
+relock:
+       return twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+                               TWL4030_PM_MASTER_PROTECT_KEY);
+}
+
 /*
  * In master mode, start the power off sequence.
  * After a successful execution, TWL shuts down the power to the SoC
@@ -615,6 +662,11 @@ void twl4030_power_off(void)
 {
        int err;
 
+       /* Disable start on charger or VBUS as it can break poweroff */
+       err = twl4030_starton_mask_and_set(STARTON_VBUS | STARTON_CHG, 0);
+       if (err)
+               pr_err("TWL4030 Unable to configure start-up\n");
+
        err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, PWR_DEVOFF,
                               TWL4030_PM_MASTER_P1_SW_EVENTS);
        if (err)
index e00f5340ed872089d31998ff39d6bb57842e731a..3c2b8f9e3c84b858df3bce9677dee22e09a8b427 100644 (file)
@@ -93,8 +93,9 @@ static int vprbrd_probe(struct usb_interface *interface,
                 version >> 8, version & 0xff,
                 vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
 
-       ret = mfd_add_devices(&interface->dev, -1, vprbrd_devs,
-                               ARRAY_SIZE(vprbrd_devs), NULL, 0, NULL);
+       ret = mfd_add_devices(&interface->dev, PLATFORM_DEVID_AUTO,
+                               vprbrd_devs, ARRAY_SIZE(vprbrd_devs), NULL, 0,
+                               NULL);
        if (ret != 0) {
                dev_err(&interface->dev, "Failed to add mfd devices to core.");
                goto error;
index 03c53b72a2d6a34bafbea7e1a1bda90299772fa8..270d58a4c43dd0e25ffa66718ca0c0ada9ad5036 100644 (file)
@@ -311,7 +311,8 @@ int mmc_of_parse(struct mmc_host *host)
        struct device_node *np;
        u32 bus_width;
        int len, ret;
-       bool cap_invert, gpio_invert;
+       bool cd_cap_invert, cd_gpio_invert = false;
+       bool ro_cap_invert, ro_gpio_invert = false;
 
        if (!host->parent || !host->parent->of_node)
                return 0;
@@ -359,16 +360,13 @@ int mmc_of_parse(struct mmc_host *host)
        if (of_find_property(np, "non-removable", &len)) {
                host->caps |= MMC_CAP_NONREMOVABLE;
        } else {
-               if (of_property_read_bool(np, "cd-inverted"))
-                       cap_invert = true;
-               else
-                       cap_invert = false;
+               cd_cap_invert = of_property_read_bool(np, "cd-inverted");
 
                if (of_find_property(np, "broken-cd", &len))
                        host->caps |= MMC_CAP_NEEDS_POLL;
 
                ret = mmc_gpiod_request_cd(host, "cd", 0, true,
-                                          0, &gpio_invert);
+                                          0, &cd_gpio_invert);
                if (ret) {
                        if (ret == -EPROBE_DEFER)
                                return ret;
@@ -391,17 +389,14 @@ int mmc_of_parse(struct mmc_host *host)
                 * both inverted, the end result is that the CD line is
                 * not inverted.
                 */
-               if (cap_invert ^ gpio_invert)
+               if (cd_cap_invert ^ cd_gpio_invert)
                        host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
        }
 
        /* Parse Write Protection */
-       if (of_property_read_bool(np, "wp-inverted"))
-               cap_invert = true;
-       else
-               cap_invert = false;
+       ro_cap_invert = of_property_read_bool(np, "wp-inverted");
 
-       ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &gpio_invert);
+       ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
        if (ret) {
                if (ret == -EPROBE_DEFER)
                        goto out;
@@ -414,7 +409,7 @@ int mmc_of_parse(struct mmc_host *host)
                dev_info(host->parent, "Got WP GPIO\n");
 
        /* See the comment on CD inversion above */
-       if (cap_invert ^ gpio_invert)
+       if (ro_cap_invert ^ ro_gpio_invert)
                host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
 
        if (of_find_property(np, "cap-sd-highspeed", &len))
index c9ac06cfe6b7b3a8f62568b70a6ad6d7ca9b44d0..a5115fb7cf331b9c39ed19a0b53d1ff527739295 100644 (file)
@@ -2471,7 +2471,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
                        bond_slave_state_change(bond);
                        if (BOND_MODE(bond) == BOND_MODE_XOR)
                                bond_update_slave_arr(bond, NULL);
-               } else if (do_failover) {
+               }
+               if (do_failover) {
                        block_netpoll_tx();
                        bond_select_active_slave(bond);
                        unblock_netpoll_tx();
index c13d83e15ace440fc624ff6913d617fdabd2699f..45f09a66e6c96662febe3015fad41b4e5e6251bf 100644 (file)
@@ -225,7 +225,12 @@ static int bond_changelink(struct net_device *bond_dev,
 
                bond_option_arp_ip_targets_clear(bond);
                nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
-                       __be32 target = nla_get_be32(attr);
+                       __be32 target;
+
+                       if (nla_len(attr) < sizeof(target))
+                               return -EINVAL;
+
+                       target = nla_get_be32(attr);
 
                        bond_opt_initval(&newval, (__force u64)target);
                        err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
index 02492d241e4c9e8cb8d49ae319067c7c0aa1f9cf..2cfe5012e4e58c3c4c51b968fba5c9c029e0db44 100644 (file)
@@ -110,7 +110,7 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
        long rate;
        u64 v64;
 
-       /* Use CIA recommended sample points */
+       /* Use CiA recommended sample points */
        if (bt->sample_point) {
                sampl_pt = bt->sample_point;
        } else {
@@ -382,7 +382,7 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx)
        BUG_ON(idx >= priv->echo_skb_max);
 
        if (priv->echo_skb[idx]) {
-               kfree_skb(priv->echo_skb[idx]);
+               dev_kfree_skb_any(priv->echo_skb[idx]);
                priv->echo_skb[idx] = NULL;
        }
 }
index fca5482c09acd43237f85e69765d3e66b96a4b86..04f20dd39007a105e329cb0168732cd8e909fd5d 100644 (file)
@@ -1,4 +1,5 @@
 config CAN_M_CAN
+       depends on HAS_IOMEM
        tristate "Bosch M_CAN devices"
        ---help---
          Say Y here if you want to support for Bosch M_CAN controller.
index 10d571eaed856814928151da898bd2053d4d899c..d7bc462aafdc27a26774aa6cb6cadb0e0d8547a3 100644 (file)
@@ -105,14 +105,36 @@ enum m_can_mram_cfg {
        MRAM_CFG_NUM,
 };
 
+/* Fast Bit Timing & Prescaler Register (FBTP) */
+#define FBTR_FBRP_MASK         0x1f
+#define FBTR_FBRP_SHIFT                16
+#define FBTR_FTSEG1_SHIFT      8
+#define FBTR_FTSEG1_MASK       (0xf << FBTR_FTSEG1_SHIFT)
+#define FBTR_FTSEG2_SHIFT      4
+#define FBTR_FTSEG2_MASK       (0x7 << FBTR_FTSEG2_SHIFT)
+#define FBTR_FSJW_SHIFT                0
+#define FBTR_FSJW_MASK         0x3
+
 /* Test Register (TEST) */
 #define TEST_LBCK      BIT(4)
 
 /* CC Control Register(CCCR) */
-#define CCCR_TEST      BIT(7)
-#define CCCR_MON       BIT(5)
-#define CCCR_CCE       BIT(1)
-#define CCCR_INIT      BIT(0)
+#define CCCR_TEST              BIT(7)
+#define CCCR_CMR_MASK          0x3
+#define CCCR_CMR_SHIFT         10
+#define CCCR_CMR_CANFD         0x1
+#define CCCR_CMR_CANFD_BRS     0x2
+#define CCCR_CMR_CAN           0x3
+#define CCCR_CME_MASK          0x3
+#define CCCR_CME_SHIFT         8
+#define CCCR_CME_CAN           0
+#define CCCR_CME_CANFD         0x1
+#define CCCR_CME_CANFD_BRS     0x2
+#define CCCR_TEST              BIT(7)
+#define CCCR_MON               BIT(5)
+#define CCCR_CCE               BIT(1)
+#define CCCR_INIT              BIT(0)
+#define CCCR_CANFD             0x10
 
 /* Bit Timing & Prescaler Register (BTP) */
 #define BTR_BRP_MASK           0x3ff
@@ -204,6 +226,7 @@ enum m_can_mram_cfg {
 
 /* Rx Buffer / FIFO Element Size Configuration (RXESC) */
 #define M_CAN_RXESC_8BYTES     0x0
+#define M_CAN_RXESC_64BYTES    0x777
 
 /* Tx Buffer Configuration(TXBC) */
 #define TXBC_NDTB_OFF          16
@@ -211,6 +234,7 @@ enum m_can_mram_cfg {
 
 /* Tx Buffer Element Size Configuration(TXESC) */
 #define TXESC_TBDS_8BYTES      0x0
+#define TXESC_TBDS_64BYTES     0x7
 
 /* Tx Event FIFO Con.guration (TXEFC) */
 #define TXEFC_EFS_OFF          16
@@ -219,11 +243,11 @@ enum m_can_mram_cfg {
 /* Message RAM Configuration (in bytes) */
 #define SIDF_ELEMENT_SIZE      4
 #define XIDF_ELEMENT_SIZE      8
-#define RXF0_ELEMENT_SIZE      16
-#define RXF1_ELEMENT_SIZE      16
+#define RXF0_ELEMENT_SIZE      72
+#define RXF1_ELEMENT_SIZE      72
 #define RXB_ELEMENT_SIZE       16
 #define TXE_ELEMENT_SIZE       8
-#define TXB_ELEMENT_SIZE       16
+#define TXB_ELEMENT_SIZE       72
 
 /* Message RAM Elements */
 #define M_CAN_FIFO_ID          0x0
@@ -231,11 +255,17 @@ enum m_can_mram_cfg {
 #define M_CAN_FIFO_DATA(n)     (0x8 + ((n) << 2))
 
 /* Rx Buffer Element */
+/* R0 */
 #define RX_BUF_ESI             BIT(31)
 #define RX_BUF_XTD             BIT(30)
 #define RX_BUF_RTR             BIT(29)
+/* R1 */
+#define RX_BUF_ANMF            BIT(31)
+#define RX_BUF_EDL             BIT(21)
+#define RX_BUF_BRS             BIT(20)
 
 /* Tx Buffer Element */
+/* R0 */
 #define TX_BUF_XTD             BIT(30)
 #define TX_BUF_RTR             BIT(29)
 
@@ -296,6 +326,7 @@ static inline void m_can_config_endisable(const struct m_can_priv *priv,
        if (enable) {
                /* enable m_can configuration */
                m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT);
+               udelay(5);
                /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
                m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
        } else {
@@ -326,41 +357,67 @@ static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv)
        m_can_write(priv, M_CAN_ILE, 0x0);
 }
 
-static void m_can_read_fifo(const struct net_device *dev, struct can_frame *cf,
-                           u32 rxfs)
+static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
 {
+       struct net_device_stats *stats = &dev->stats;
        struct m_can_priv *priv = netdev_priv(dev);
-       u32 id, fgi;
+       struct canfd_frame *cf;
+       struct sk_buff *skb;
+       u32 id, fgi, dlc;
+       int i;
 
        /* calculate the fifo get index for where to read data */
        fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF;
+       dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC);
+       if (dlc & RX_BUF_EDL)
+               skb = alloc_canfd_skb(dev, &cf);
+       else
+               skb = alloc_can_skb(dev, (struct can_frame **)&cf);
+       if (!skb) {
+               stats->rx_dropped++;
+               return;
+       }
+
+       if (dlc & RX_BUF_EDL)
+               cf->len = can_dlc2len((dlc >> 16) & 0x0F);
+       else
+               cf->len = get_can_dlc((dlc >> 16) & 0x0F);
+
        id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID);
        if (id & RX_BUF_XTD)
                cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
        else
                cf->can_id = (id >> 18) & CAN_SFF_MASK;
 
-       if (id & RX_BUF_RTR) {
+       if (id & RX_BUF_ESI) {
+               cf->flags |= CANFD_ESI;
+               netdev_dbg(dev, "ESI Error\n");
+       }
+
+       if (!(dlc & RX_BUF_EDL) && (id & RX_BUF_RTR)) {
                cf->can_id |= CAN_RTR_FLAG;
        } else {
-               id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC);
-               cf->can_dlc = get_can_dlc((id >> 16) & 0x0F);
-               *(u32 *)(cf->data + 0) = m_can_fifo_read(priv, fgi,
-                                                        M_CAN_FIFO_DATA(0));
-               *(u32 *)(cf->data + 4) = m_can_fifo_read(priv, fgi,
-                                                        M_CAN_FIFO_DATA(1));
+               if (dlc & RX_BUF_BRS)
+                       cf->flags |= CANFD_BRS;
+
+               for (i = 0; i < cf->len; i += 4)
+                       *(u32 *)(cf->data + i) =
+                               m_can_fifo_read(priv, fgi,
+                                               M_CAN_FIFO_DATA(i / 4));
        }
 
        /* acknowledge rx fifo 0 */
        m_can_write(priv, M_CAN_RXF0A, fgi);
+
+       stats->rx_packets++;
+       stats->rx_bytes += cf->len;
+
+       netif_receive_skb(skb);
 }
 
 static int m_can_do_rx_poll(struct net_device *dev, int quota)
 {
        struct m_can_priv *priv = netdev_priv(dev);
-       struct net_device_stats *stats = &dev->stats;
-       struct sk_buff *skb;
-       struct can_frame *frame;
        u32 pkts = 0;
        u32 rxfs;
 
@@ -374,18 +431,7 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
                if (rxfs & RXFS_RFL)
                        netdev_warn(dev, "Rx FIFO 0 Message Lost\n");
 
-               skb = alloc_can_skb(dev, &frame);
-               if (!skb) {
-                       stats->rx_dropped++;
-                       return pkts;
-               }
-
-               m_can_read_fifo(dev, frame, rxfs);
-
-               stats->rx_packets++;
-               stats->rx_bytes += frame->can_dlc;
-
-               netif_receive_skb(skb);
+               m_can_read_fifo(dev, rxfs);
 
                quota--;
                pkts++;
@@ -481,11 +527,23 @@ static int m_can_handle_lec_err(struct net_device *dev,
        return 1;
 }
 
+static int __m_can_get_berr_counter(const struct net_device *dev,
+                                   struct can_berr_counter *bec)
+{
+       struct m_can_priv *priv = netdev_priv(dev);
+       unsigned int ecr;
+
+       ecr = m_can_read(priv, M_CAN_ECR);
+       bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT;
+       bec->txerr = ecr & ECR_TEC_MASK;
+
+       return 0;
+}
+
 static int m_can_get_berr_counter(const struct net_device *dev,
                                  struct can_berr_counter *bec)
 {
        struct m_can_priv *priv = netdev_priv(dev);
-       unsigned int ecr;
        int err;
 
        err = clk_prepare_enable(priv->hclk);
@@ -498,9 +556,7 @@ static int m_can_get_berr_counter(const struct net_device *dev,
                return err;
        }
 
-       ecr = m_can_read(priv, M_CAN_ECR);
-       bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT;
-       bec->txerr = ecr & ECR_TEC_MASK;
+       __m_can_get_berr_counter(dev, bec);
 
        clk_disable_unprepare(priv->cclk);
        clk_disable_unprepare(priv->hclk);
@@ -544,7 +600,7 @@ static int m_can_handle_state_change(struct net_device *dev,
        if (unlikely(!skb))
                return 0;
 
-       m_can_get_berr_counter(dev, &bec);
+       __m_can_get_berr_counter(dev, &bec);
 
        switch (new_state) {
        case CAN_STATE_ERROR_ACTIVE:
@@ -596,14 +652,14 @@ static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
 
        if ((psr & PSR_EP) &&
            (priv->can.state != CAN_STATE_ERROR_PASSIVE)) {
-               netdev_dbg(dev, "entered error warning state\n");
+               netdev_dbg(dev, "entered error passive state\n");
                work_done += m_can_handle_state_change(dev,
                                                       CAN_STATE_ERROR_PASSIVE);
        }
 
        if ((psr & PSR_BO) &&
            (priv->can.state != CAN_STATE_BUS_OFF)) {
-               netdev_dbg(dev, "entered error warning state\n");
+               netdev_dbg(dev, "entered error bus off state\n");
                work_done += m_can_handle_state_change(dev,
                                                       CAN_STATE_BUS_OFF);
        }
@@ -615,7 +671,7 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
 {
        if (irqstatus & IR_WDI)
                netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
-       if (irqstatus & IR_BEU)
+       if (irqstatus & IR_ELO)
                netdev_err(dev, "Error Logging Overflow\n");
        if (irqstatus & IR_BEU)
                netdev_err(dev, "Bit Error Uncorrected\n");
@@ -733,10 +789,23 @@ static const struct can_bittiming_const m_can_bittiming_const = {
        .brp_inc = 1,
 };
 
+static const struct can_bittiming_const m_can_data_bittiming_const = {
+       .name = KBUILD_MODNAME,
+       .tseg1_min = 2,         /* Time segment 1 = prop_seg + phase_seg1 */
+       .tseg1_max = 16,
+       .tseg2_min = 1,         /* Time segment 2 = phase_seg2 */
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 32,
+       .brp_inc = 1,
+};
+
 static int m_can_set_bittiming(struct net_device *dev)
 {
        struct m_can_priv *priv = netdev_priv(dev);
        const struct can_bittiming *bt = &priv->can.bittiming;
+       const struct can_bittiming *dbt = &priv->can.data_bittiming;
        u16 brp, sjw, tseg1, tseg2;
        u32 reg_btp;
 
@@ -747,7 +816,17 @@ static int m_can_set_bittiming(struct net_device *dev)
        reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) |
                        (tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT);
        m_can_write(priv, M_CAN_BTP, reg_btp);
-       netdev_dbg(dev, "setting BTP 0x%x\n", reg_btp);
+
+       if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+               brp = dbt->brp - 1;
+               sjw = dbt->sjw - 1;
+               tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
+               tseg2 = dbt->phase_seg2 - 1;
+               reg_btp = (brp << FBTR_FBRP_SHIFT) | (sjw << FBTR_FSJW_SHIFT) |
+                               (tseg1 << FBTR_FTSEG1_SHIFT) |
+                               (tseg2 << FBTR_FTSEG2_SHIFT);
+               m_can_write(priv, M_CAN_FBTP, reg_btp);
+       }
 
        return 0;
 }
@@ -767,8 +846,8 @@ static void m_can_chip_config(struct net_device *dev)
 
        m_can_config_endisable(priv, true);
 
-       /* RX Buffer/FIFO Element Size 8 bytes data field */
-       m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_8BYTES);
+       /* RX Buffer/FIFO Element Size 64 bytes data field */
+       m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_64BYTES);
 
        /* Accept Non-matching Frames Into FIFO 0 */
        m_can_write(priv, M_CAN_GFC, 0x0);
@@ -777,8 +856,8 @@ static void m_can_chip_config(struct net_device *dev)
        m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) |
                    priv->mcfg[MRAM_TXB].off);
 
-       /* only support 8 bytes firstly */
-       m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_8BYTES);
+       /* support 64 bytes payload */
+       m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES);
 
        m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) |
                    priv->mcfg[MRAM_TXE].off);
@@ -793,7 +872,8 @@ static void m_can_chip_config(struct net_device *dev)
                    RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off);
 
        cccr = m_can_read(priv, M_CAN_CCCR);
-       cccr &= ~(CCCR_TEST | CCCR_MON);
+       cccr &= ~(CCCR_TEST | CCCR_MON | (CCCR_CMR_MASK << CCCR_CMR_SHIFT) |
+               (CCCR_CME_MASK << CCCR_CME_SHIFT));
        test = m_can_read(priv, M_CAN_TEST);
        test &= ~TEST_LBCK;
 
@@ -805,6 +885,9 @@ static void m_can_chip_config(struct net_device *dev)
                test |= TEST_LBCK;
        }
 
+       if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+               cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT;
+
        m_can_write(priv, M_CAN_CCCR, cccr);
        m_can_write(priv, M_CAN_TEST, test);
 
@@ -869,11 +952,13 @@ static struct net_device *alloc_m_can_dev(void)
 
        priv->dev = dev;
        priv->can.bittiming_const = &m_can_bittiming_const;
+       priv->can.data_bittiming_const = &m_can_data_bittiming_const;
        priv->can.do_set_mode = m_can_set_mode;
        priv->can.do_get_berr_counter = m_can_get_berr_counter;
        priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
                                        CAN_CTRLMODE_LISTENONLY |
-                                       CAN_CTRLMODE_BERR_REPORTING;
+                                       CAN_CTRLMODE_BERR_REPORTING |
+                                       CAN_CTRLMODE_FD;
 
        return dev;
 }
@@ -956,8 +1041,9 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
                                    struct net_device *dev)
 {
        struct m_can_priv *priv = netdev_priv(dev);
-       struct can_frame *cf = (struct can_frame *)skb->data;
-       u32 id;
+       struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+       u32 id, cccr;
+       int i;
 
        if (can_dropped_invalid_skb(dev, skb))
                return NETDEV_TX_OK;
@@ -976,11 +1062,28 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
 
        /* message ram configuration */
        m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id);
-       m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, cf->can_dlc << 16);
-       m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(0), *(u32 *)(cf->data + 0));
-       m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(1), *(u32 *)(cf->data + 4));
+       m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, can_len2dlc(cf->len) << 16);
+
+       for (i = 0; i < cf->len; i += 4)
+               m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(i / 4),
+                                *(u32 *)(cf->data + i));
+
        can_put_echo_skb(skb, dev, 0);
 
+       if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+               cccr = m_can_read(priv, M_CAN_CCCR);
+               cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT);
+               if (can_is_canfd_skb(skb)) {
+                       if (cf->flags & CANFD_BRS)
+                               cccr |= CCCR_CMR_CANFD_BRS << CCCR_CMR_SHIFT;
+                       else
+                               cccr |= CCCR_CMR_CANFD << CCCR_CMR_SHIFT;
+               } else {
+                       cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT;
+               }
+               m_can_write(priv, M_CAN_CCCR, cccr);
+       }
+
        /* enable first TX buffer to start transfer  */
        m_can_write(priv, M_CAN_TXBTIE, 0x1);
        m_can_write(priv, M_CAN_TXBAR, 0x1);
@@ -992,6 +1095,7 @@ static const struct net_device_ops m_can_netdev_ops = {
        .ndo_open = m_can_open,
        .ndo_stop = m_can_close,
        .ndo_start_xmit = m_can_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 static int register_m_can_dev(struct net_device *dev)
@@ -1009,7 +1113,7 @@ static int m_can_of_parse_mram(struct platform_device *pdev,
        struct resource *res;
        void __iomem *addr;
        u32 out_val[MRAM_CFG_LEN];
-       int ret;
+       int i, start, end, ret;
 
        /* message ram could be shared */
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
@@ -1060,6 +1164,15 @@ static int m_can_of_parse_mram(struct platform_device *pdev,
                priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num,
                priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num);
 
+       /* initialize the entire Message RAM in use to avoid possible
+        * ECC/parity checksum errors when reading an uninitialized buffer
+        */
+       start = priv->mcfg[MRAM_SIDF].off;
+       end = priv->mcfg[MRAM_TXB].off +
+               priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
+       for (i = start; i < end; i += 4)
+               writel(0x0, priv->mram_base + i);
+
        return 0;
 }
 
index 1abe133d159428e0e098d611e8898bdaabb04c71..9718248e55f1e7c95c748b33538a4fb90a6f8d01 100644 (file)
@@ -628,6 +628,7 @@ static const struct net_device_ops rcar_can_netdev_ops = {
        .ndo_open = rcar_can_open,
        .ndo_stop = rcar_can_close,
        .ndo_start_xmit = rcar_can_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
index 8ff3424d51472bc1df5753c025c6cd73f9ab1ed0..15c00faeec61001ab02c22689709efb523a96eb0 100644 (file)
@@ -214,7 +214,7 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
        struct net_device *dev;
        struct sja1000_priv *priv;
        struct kvaser_pci *board;
-       int err, init_step;
+       int err;
 
        dev = alloc_sja1000dev(sizeof(struct kvaser_pci));
        if (dev == NULL)
@@ -235,7 +235,6 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
        if (channel == 0) {
                board->xilinx_ver =
                        ioread8(board->res_addr + XILINX_VERINT) >> 4;
-               init_step = 2;
 
                /* Assert PTADR# - we're in passive mode so the other bits are
                   not important */
@@ -264,8 +263,6 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
        priv->irq_flags = IRQF_SHARED;
        dev->irq = pdev->irq;
 
-       init_step = 4;
-
        dev_info(&pdev->dev, "reg_base=%p conf_addr=%p irq=%d\n",
                 priv->reg_base, board->conf_addr, dev->irq);
 
index 00f2534dde736f1dd8cda9b016bd6a78f79eeab0..29d3f0938eb836b4da53a0898f574ebf42721ae2 100644 (file)
@@ -434,10 +434,9 @@ static void ems_usb_read_bulk_callback(struct urb *urb)
        if (urb->actual_length > CPC_HEADER_SIZE) {
                struct ems_cpc_msg *msg;
                u8 *ibuf = urb->transfer_buffer;
-               u8 msg_count, again, start;
+               u8 msg_count, start;
 
                msg_count = ibuf[0] & ~0x80;
-               again = ibuf[0] & 0x80;
 
                start = CPC_HEADER_SIZE;
 
index b7c9e8b11460a3d6bd0e4fe7836f0e07e73eb586..c063a54ab8dd8a598f36e5a7e712722bb5931df1 100644 (file)
@@ -464,7 +464,6 @@ static void esd_usb2_write_bulk_callback(struct urb *urb)
 {
        struct esd_tx_urb_context *context = urb->context;
        struct esd_usb2_net_priv *priv;
-       struct esd_usb2 *dev;
        struct net_device *netdev;
        size_t size = sizeof(struct esd_usb2_msg);
 
@@ -472,7 +471,6 @@ static void esd_usb2_write_bulk_callback(struct urb *urb)
 
        priv = context->priv;
        netdev = priv->netdev;
-       dev = priv->usb2;
 
        /* free up our allocated buffer */
        usb_free_coherent(urb->dev, size,
@@ -1143,6 +1141,7 @@ static void esd_usb2_disconnect(struct usb_interface *intf)
                        }
                }
                unlink_all_urbs(dev);
+               kfree(dev);
        }
 }
 
index 04b0f84612f0cf12fd1cfc59925a60c448423fa2..009acc8641fc557cb580cb688983daf041519e4b 100644 (file)
@@ -718,6 +718,7 @@ static const struct net_device_ops gs_usb_netdev_ops = {
        .ndo_open = gs_can_open,
        .ndo_stop = gs_can_close,
        .ndo_start_xmit = gs_can_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf)
index 5e8b5609c067c53c2283a94dacd820fac8f1c565..8a998e3884ce0d502d9ebb19e7abde941ab85d3e 100644 (file)
@@ -300,7 +300,8 @@ static int xcan_set_bittiming(struct net_device *ndev)
 static int xcan_chip_start(struct net_device *ndev)
 {
        struct xcan_priv *priv = netdev_priv(ndev);
-       u32 err, reg_msr, reg_sr_mask;
+       u32 reg_msr, reg_sr_mask;
+       int err;
        unsigned long timeout;
 
        /* Check if it is in reset mode */
@@ -961,6 +962,7 @@ static const struct net_device_ops xcan_netdev_ops = {
        .ndo_open       = xcan_open,
        .ndo_stop       = xcan_close,
        .ndo_start_xmit = xcan_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 /**
index b9625968daacc0eb89c0f7371a3a4e70242f95ce..4f4c2a7888e5d74ee06ae58df8feaf5f1dea3123 100644 (file)
@@ -377,6 +377,29 @@ static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
+{
+       unsigned int timeout = 1000;
+       u32 reg;
+
+       reg = core_readl(priv, CORE_WATCHDOG_CTRL);
+       reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
+       core_writel(priv, reg, CORE_WATCHDOG_CTRL);
+
+       do {
+               reg = core_readl(priv, CORE_WATCHDOG_CTRL);
+               if (!(reg & SOFTWARE_RESET))
+                       break;
+
+               usleep_range(1000, 2000);
+       } while (timeout-- > 0);
+
+       if (timeout == 0)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
 {
        const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
@@ -404,11 +427,18 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
                *base = of_iomap(dn, i);
                if (*base == NULL) {
                        pr_err("unable to find register: %s\n", reg_names[i]);
-                       return -ENODEV;
+                       ret = -ENOMEM;
+                       goto out_unmap;
                }
                base++;
        }
 
+       ret = bcm_sf2_sw_rst(priv);
+       if (ret) {
+               pr_err("unable to software reset switch: %d\n", ret);
+               goto out_unmap;
+       }
+
        /* Disable all interrupts and request them */
        intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
        intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
@@ -484,7 +514,8 @@ out_free_irq0:
 out_unmap:
        base = &priv->core;
        for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
-               iounmap(*base);
+               if (*base)
+                       iounmap(*base);
                base++;
        }
        return ret;
@@ -733,29 +764,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
        return 0;
 }
 
-static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
-{
-       unsigned int timeout = 1000;
-       u32 reg;
-
-       reg = core_readl(priv, CORE_WATCHDOG_CTRL);
-       reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
-       core_writel(priv, reg, CORE_WATCHDOG_CTRL);
-
-       do {
-               reg = core_readl(priv, CORE_WATCHDOG_CTRL);
-               if (!(reg & SOFTWARE_RESET))
-                       break;
-
-               usleep_range(1000, 2000);
-       } while (timeout-- > 0);
-
-       if (timeout == 0)
-               return -ETIMEDOUT;
-
-       return 0;
-}
-
 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
 {
        struct bcm_sf2_priv *priv = ds_to_priv(ds);
index 63ea1941e973b6338aa7d4431ac2b9ef2d2e418b..7ba83ffb08ac73b6437f1fd4a87c3a560b9d4f84 100644 (file)
@@ -575,10 +575,24 @@ static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
        xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
 }
 
-static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
+bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
+{
+       if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
+               return false;
+
+       if (ioread32(p->ring_csr_addr + SRST_ADDR))
+               return false;
+
+       return true;
+}
+
+static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
 {
        u32 val;
 
+       if (!xgene_ring_mgr_init(pdata))
+               return -ENODEV;
+
        clk_prepare_enable(pdata->clk);
        clk_disable_unprepare(pdata->clk);
        clk_prepare_enable(pdata->clk);
@@ -590,6 +604,8 @@ static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
        val |= SCAN_AUTO_INCR;
        MGMT_CLOCK_SEL_SET(&val, 1);
        xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
+
+       return 0;
 }
 
 static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
index 38558584080ed8029a3640f171353fe09cb6503e..ec45f3256f0e3da2928c8be98ba9abcc0d58fb27 100644 (file)
@@ -104,6 +104,9 @@ enum xgene_enet_rm {
 #define BLOCK_ETH_MAC_OFFSET           0x0000
 #define BLOCK_ETH_MAC_CSR_OFFSET       0x2800
 
+#define CLKEN_ADDR                     0xc208
+#define SRST_ADDR                      0xc200
+
 #define MAC_ADDR_REG_OFFSET            0x00
 #define MAC_COMMAND_REG_OFFSET         0x04
 #define MAC_WRITE_REG_OFFSET           0x08
@@ -318,6 +321,7 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
 
 int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
 void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
+bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
 
 extern struct xgene_mac_ops xgene_gmac_ops;
 extern struct xgene_port_ops xgene_gport_ops;
index 3c208cc6f6bb470ae97d74f06fbaf0dcbaaf7ee6..123669696184fde824328fb61c9b2649f1c750c9 100644 (file)
@@ -639,9 +639,9 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
        struct device *dev = ndev_to_dev(ndev);
        struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
        struct xgene_enet_desc_ring *buf_pool = NULL;
-       u8 cpu_bufnum = 0, eth_bufnum = 0;
-       u8 bp_bufnum = 0x20;
-       u16 ring_id, ring_num = 0;
+       u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM;
+       u8 bp_bufnum = START_BP_BUFNUM;
+       u16 ring_id, ring_num = START_RING_NUM;
        int ret;
 
        /* allocate rx descriptor ring */
@@ -852,7 +852,9 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
        u16 dst_ring_num;
        int ret;
 
-       pdata->port_ops->reset(pdata);
+       ret = pdata->port_ops->reset(pdata);
+       if (ret)
+               return ret;
 
        ret = xgene_enet_create_desc_rings(ndev);
        if (ret) {
@@ -954,6 +956,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
 
        return ret;
 err:
+       unregister_netdev(ndev);
        free_netdev(ndev);
        return ret;
 }
index 874e5a01161fb9e9e1a05eb90afee3b148c07182..f9958fae6ffdc9fcba7cbb8f8438d95024b40ac9 100644 (file)
@@ -38,6 +38,9 @@
 #define SKB_BUFFER_SIZE                (XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
 #define NUM_PKT_BUF    64
 #define NUM_BUFPOOL    32
+#define START_ETH_BUFNUM       2
+#define START_BP_BUFNUM                0x22
+#define START_RING_NUM         8
 
 #define PHY_POLL_LINK_ON       (10 * HZ)
 #define PHY_POLL_LINK_OFF      (PHY_POLL_LINK_ON / 5)
@@ -83,7 +86,7 @@ struct xgene_mac_ops {
 };
 
 struct xgene_port_ops {
-       void (*reset)(struct xgene_enet_pdata *pdata);
+       int (*reset)(struct xgene_enet_pdata *pdata);
        void (*cle_bypass)(struct xgene_enet_pdata *pdata,
                           u32 dst_ring_num, u16 bufpool_id);
        void (*shutdown)(struct xgene_enet_pdata *pdata);
index c22f32622fa9a50ad3789d8000991ede58502ca0..f5d4f68c288c395076205ba128788797d7114cdf 100644 (file)
@@ -311,14 +311,19 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
        xgene_sgmac_rxtx(p, TX_EN, false);
 }
 
-static void xgene_enet_reset(struct xgene_enet_pdata *p)
+static int xgene_enet_reset(struct xgene_enet_pdata *p)
 {
+       if (!xgene_ring_mgr_init(p))
+               return -ENODEV;
+
        clk_prepare_enable(p->clk);
        clk_disable_unprepare(p->clk);
        clk_prepare_enable(p->clk);
 
        xgene_enet_ecc_init(p);
        xgene_enet_config_ring_if_assoc(p);
+
+       return 0;
 }
 
 static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
index 67d07206b3c7723e2287a41fb964860cc4b78d04..a18a9d1f11432d4469d2d711ec633f22565017eb 100644 (file)
@@ -252,14 +252,19 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
        xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
 }
 
-static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
+static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
 {
+       if (!xgene_ring_mgr_init(pdata))
+               return -ENODEV;
+
        clk_prepare_enable(pdata->clk);
        clk_disable_unprepare(pdata->clk);
        clk_prepare_enable(pdata->clk);
 
        xgene_enet_ecc_init(pdata);
        xgene_enet_config_ring_if_assoc(pdata);
+
+       return 0;
 }
 
 static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
index 3a6778a667f4558f52f1327f413e088882490f7d..531bb7c57531b17fc64b0719690accba8afb6b1e 100644 (file)
@@ -1110,7 +1110,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
        /* We just need one DMA descriptor which is DMA-able, since writing to
         * the port will allocate a new descriptor in its internal linked-list
         */
-       p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
+       p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
+                               GFP_KERNEL);
        if (!p) {
                netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
                return -ENOMEM;
@@ -1174,6 +1175,13 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
        if (!(reg & TDMA_DISABLED))
                netdev_warn(priv->netdev, "TDMA not stopped!\n");
 
+       /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
+        * fail, so by checking this pointer we know whether the TX ring was
+        * fully initialized or not.
+        */
+       if (!ring->cbs)
+               return;
+
        napi_disable(&ring->napi);
        netif_napi_del(&ring->napi);
 
@@ -1183,7 +1191,8 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
        ring->cbs = NULL;
 
        if (ring->desc_dma) {
-               dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
+               dma_free_coherent(kdev, sizeof(struct dma_desc),
+                                 ring->desc_cpu, ring->desc_dma);
                ring->desc_dma = 0;
        }
        ring->size = 0;
index fdc9ec09e453510636460780226e0349cc51b9a5..da1a2500c91ce43677e7cada4b66196c17af2513 100644 (file)
@@ -2140,6 +2140,12 @@ static int bcmgenet_open(struct net_device *dev)
                goto err_irq0;
        }
 
+       /* Re-configure the port multiplexer towards the PHY device */
+       bcmgenet_mii_config(priv->dev, false);
+
+       phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
+                          priv->phy_interface);
+
        bcmgenet_netif_start(dev);
 
        return 0;
@@ -2184,6 +2190,9 @@ static int bcmgenet_close(struct net_device *dev)
 
        bcmgenet_netif_stop(dev);
 
+       /* Really kill the PHY state machine and disconnect from it */
+       phy_disconnect(priv->phydev);
+
        /* Disable MAC receive */
        umac_enable_set(priv, CMD_RX_EN, false);
 
@@ -2685,7 +2694,7 @@ static int bcmgenet_resume(struct device *d)
 
        phy_init_hw(priv->phydev);
        /* Speed settings must be restored */
-       bcmgenet_mii_config(priv->dev);
+       bcmgenet_mii_config(priv->dev, false);
 
        /* disable ethernet MAC while updating its registers */
        umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
index dbf524ea3b19561bc1098b3cd58620cf9d66ac43..31b2da5f9b821342315ddebd958a765e7560cd51 100644 (file)
@@ -617,9 +617,10 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
 
 /* MDIO routines */
 int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_config(struct net_device *dev);
+int bcmgenet_mii_config(struct net_device *dev, bool init);
 void bcmgenet_mii_exit(struct net_device *dev);
 void bcmgenet_mii_reset(struct net_device *dev);
+void bcmgenet_mii_setup(struct net_device *dev);
 
 /* Wake-on-LAN routines */
 void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
index 9ff799a9f8019dafe2d47be33aba738921e588a3..933cd7e7cd33708bf89292a7bb50035165d0e867 100644 (file)
@@ -77,7 +77,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
 /* setup netdev link state when PHY link status change and
  * update UMAC and RGMII block when link up
  */
-static void bcmgenet_mii_setup(struct net_device *dev)
+void bcmgenet_mii_setup(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct phy_device *phydev = priv->phydev;
@@ -211,7 +211,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
        bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
 }
 
-int bcmgenet_mii_config(struct net_device *dev)
+int bcmgenet_mii_config(struct net_device *dev, bool init)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct phy_device *phydev = priv->phydev;
@@ -298,7 +298,8 @@ int bcmgenet_mii_config(struct net_device *dev)
                return -EINVAL;
        }
 
-       dev_info(kdev, "configuring instance for %s\n", phy_name);
+       if (init)
+               dev_info(kdev, "configuring instance for %s\n", phy_name);
 
        return 0;
 }
@@ -350,7 +351,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
         * PHY speed which is needed for bcmgenet_mii_config() to configure
         * things appropriately.
         */
-       ret = bcmgenet_mii_config(dev);
+       ret = bcmgenet_mii_config(dev, true);
        if (ret) {
                phy_disconnect(priv->phydev);
                return ret;
index dbb41c1923e60cf1a152bf414ef3428c2d0f2167..77f8f836cbbe18a75d1ffa58fc61c077414eab5b 100644 (file)
@@ -8563,7 +8563,8 @@ static int tg3_init_rings(struct tg3 *tp)
                if (tnapi->rx_rcb)
                        memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
 
-               if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
+               if (tnapi->prodring.rx_std &&
+                   tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
                        tg3_free_rings(tp);
                        return -ENOMEM;
                }
index 6fe300e316c3c41e398b06f1864cfdc7f1f0ebd6..4fe33606f372755efb2bcef46f28c68b7d763312 100644 (file)
@@ -79,8 +79,9 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
                app.protocol = dcb->app_priority[i].protocolid;
 
                if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
+                       app.priority = dcb->app_priority[i].user_prio_map;
                        app.selector = dcb->app_priority[i].sel_field + 1;
-                       err = dcb_ieee_setapp(dev, &app);
+                       err = dcb_ieee_delapp(dev, &app);
                } else {
                        app.selector = !!(dcb->app_priority[i].sel_field);
                        err = dcb_setapp(dev, &app);
@@ -122,7 +123,11 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
                case CXGB4_DCB_INPUT_FW_ENABLED: {
                        /* we're going to use Firmware DCB */
                        dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
-                       dcb->supported = CXGB4_DCBX_FW_SUPPORT;
+                       dcb->supported = DCB_CAP_DCBX_LLD_MANAGED;
+                       if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE)
+                               dcb->supported |= DCB_CAP_DCBX_VER_IEEE;
+                       else
+                               dcb->supported |= DCB_CAP_DCBX_VER_CEE;
                        break;
                }
 
@@ -436,14 +441,17 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
        *up_tc_map = (1 << tc);
 
        /* prio_type is link strict */
-       *prio_type = 0x2;
+       if (*pgid != 0xF)
+               *prio_type = 0x2;
 }
 
 static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc,
                                u8 *prio_type, u8 *pgid, u8 *bw_per,
                                u8 *up_tc_map)
 {
-       return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1);
+       /* tc 0 is written at MSB position */
+       return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
+                               up_tc_map, 1);
 }
 
 
@@ -451,7 +459,9 @@ static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc,
                                u8 *prio_type, u8 *pgid, u8 *bw_per,
                                u8 *up_tc_map)
 {
-       return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0);
+       /* tc 0 is written at MSB position */
+       return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
+                               up_tc_map, 0);
 }
 
 static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
@@ -461,6 +471,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
        struct fw_port_cmd pcmd;
        struct port_info *pi = netdev2pinfo(dev);
        struct adapter *adap = pi->adapter;
+       int fw_tc = 7 - tc;
        u32 _pgid;
        int err;
 
@@ -479,8 +490,8 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
        }
 
        _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
-       _pgid &= ~(0xF << (tc * 4));
-       _pgid |= pgid << (tc * 4);
+       _pgid &= ~(0xF << (fw_tc * 4));
+       _pgid |= pgid << (fw_tc * 4);
        pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid);
 
        INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
@@ -593,7 +604,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
            priority >= CXGB4_MAX_PRIORITY)
                *pfccfg = 0;
        else
-               *pfccfg = (pi->dcb.pfcen >> priority) & 1;
+               *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1;
 }
 
 /* Enable/disable Priority Pause Frames for the specified Traffic Class
@@ -618,9 +629,9 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
        pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
 
        if (pfccfg)
-               pcmd.u.dcb.pfc.pfcen |= (1 << priority);
+               pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority));
        else
-               pcmd.u.dcb.pfc.pfcen &= (~(1 << priority));
+               pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority)));
 
        err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
        if (err != FW_PORT_DCB_CFG_SUCCESS) {
@@ -1071,7 +1082,7 @@ static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg)
        pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
 
        for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
-               pg->prio_pg[i] = (pgid >> (i * 4)) & 0xF;
+               pg->prio_pg[7 - i] = (pgid >> (i * 4)) & 0xF;
 
        INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
        pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
index 8520d5529df872fad60a377231f5154fcf91a4e3..279873cb6e3ac33b196d4689daeefdb7c1bf87f5 100644 (file)
@@ -2442,9 +2442,13 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
                     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
                     SUPPORTED_10000baseKX4_Full;
        else if (type == FW_PORT_TYPE_FIBER_XFI ||
-                type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
+                type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) {
                v |= SUPPORTED_FIBRE;
-       else if (type == FW_PORT_TYPE_BP40_BA)
+               if (caps & FW_PORT_CAP_SPEED_1G)
+                       v |= SUPPORTED_1000baseT_Full;
+               if (caps & FW_PORT_CAP_SPEED_10G)
+                       v |= SUPPORTED_10000baseT_Full;
+       } else if (type == FW_PORT_TYPE_BP40_BA)
                v |= SUPPORTED_40000baseSR4_Full;
 
        if (caps & FW_PORT_CAP_ANEG)
index 5e1b314e11af674f8d0f7f1ebf3bed60835dda08..39f2b13e66c731add3b842ef8a8db1fcde906c4b 100644 (file)
@@ -2914,7 +2914,8 @@ static int t4_sge_init_hard(struct adapter *adap)
 int t4_sge_init(struct adapter *adap)
 {
        struct sge *s = &adap->sge;
-       u32 sge_control, sge_conm_ctrl;
+       u32 sge_control, sge_control2, sge_conm_ctrl;
+       unsigned int ingpadboundary, ingpackboundary;
        int ret, egress_threshold;
 
        /*
@@ -2924,8 +2925,31 @@ int t4_sge_init(struct adapter *adap)
        sge_control = t4_read_reg(adap, SGE_CONTROL);
        s->pktshift = PKTSHIFT_GET(sge_control);
        s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
-       s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
-                           X_INGPADBOUNDARY_SHIFT);
+
+       /* T4 uses a single control field to specify both the PCIe Padding and
+        * Packing Boundary.  T5 introduced the ability to specify these
+        * separately.  The actual Ingress Packet Data alignment boundary
+        * within Packed Buffer Mode is the maximum of these two
+        * specifications.
+        */
+       ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) +
+                              X_INGPADBOUNDARY_SHIFT);
+       if (is_t4(adap->params.chip)) {
+               s->fl_align = ingpadboundary;
+       } else {
+               /* T5 has a different interpretation of one of the PCIe Packing
+                * Boundary values.
+                */
+               sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
+               ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
+               if (ingpackboundary == INGPACKBOUNDARY_16B_X)
+                       ingpackboundary = 16;
+               else
+                       ingpackboundary = 1 << (ingpackboundary +
+                                               INGPACKBOUNDARY_SHIFT_X);
+
+               s->fl_align = max(ingpadboundary, ingpackboundary);
+       }
 
        if (adap->flags & USING_SOFT_PARAMS)
                ret = t4_sge_init_soft(adap);
index a9d9d74e4f092f969c2d58eaf01455b7bca7e257..163a2a14948cf8e3f5d78cf63588ee3905baa315 100644 (file)
@@ -3129,12 +3129,51 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
                     HOSTPAGESIZEPF6(sge_hps) |
                     HOSTPAGESIZEPF7(sge_hps));
 
-       t4_set_reg_field(adap, SGE_CONTROL,
-                        INGPADBOUNDARY_MASK |
-                        EGRSTATUSPAGESIZE_MASK,
-                        INGPADBOUNDARY(fl_align_log - 5) |
-                        EGRSTATUSPAGESIZE(stat_len != 64));
-
+       if (is_t4(adap->params.chip)) {
+               t4_set_reg_field(adap, SGE_CONTROL,
+                                INGPADBOUNDARY_MASK |
+                                EGRSTATUSPAGESIZE_MASK,
+                                INGPADBOUNDARY(fl_align_log - 5) |
+                                EGRSTATUSPAGESIZE(stat_len != 64));
+       } else {
+               /* T5 introduced the separation of the Free List Padding and
+                * Packing Boundaries.  Thus, we can select a smaller Padding
+                * Boundary to avoid uselessly chewing up PCIe Link and Memory
+                * Bandwidth, and use a Packing Boundary which is large enough
+                * to avoid false sharing between CPUs, etc.
+                *
+                * For the PCI Link, the smaller the Padding Boundary the
+                * better.  For the Memory Controller, a smaller Padding
+                * Boundary is better until we cross under the Memory Line
+                * Size (the minimum unit of transfer to/from Memory).  If we
+                * have a Padding Boundary which is smaller than the Memory
+                * Line Size, that'll involve a Read-Modify-Write cycle on the
+                * Memory Controller which is never good.  For T5 the smallest
+                * Padding Boundary which we can select is 32 bytes which is
+                * larger than any known Memory Controller Line Size so we'll
+                * use that.
+                *
+                * T5 has a different interpretation of the "0" value for the
+                * Packing Boundary.  This corresponds to 16 bytes instead of
+                * the expected 32 bytes.  We never have a Packing Boundary
+                * less than 32 bytes so we can't use that special value but
+                * on the other hand, if we wanted 32 bytes, the best we can
+                * really do is 64 bytes.
+               */
+               if (fl_align <= 32) {
+                       fl_align = 64;
+                       fl_align_log = 6;
+               }
+               t4_set_reg_field(adap, SGE_CONTROL,
+                                INGPADBOUNDARY_MASK |
+                                EGRSTATUSPAGESIZE_MASK,
+                                INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
+                                EGRSTATUSPAGESIZE(stat_len != 64));
+               t4_set_reg_field(adap, SGE_CONTROL2_A,
+                                INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
+                                INGPACKBOUNDARY_V(fl_align_log -
+                                                INGPACKBOUNDARY_SHIFT_X));
+       }
        /*
         * Adjust various SGE Free List Host Buffer Sizes.
         *
index a1024db5dc136bb2a64fe5d3d696580ae3e38434..8d2de1006b084574e78a9095a721cd359b115945 100644 (file)
@@ -95,6 +95,7 @@
 #define X_INGPADBOUNDARY_SHIFT 5
 
 #define SGE_CONTROL 0x1008
+#define SGE_CONTROL2_A         0x1124
 #define  DCASYSTYPE             0x00080000U
 #define  RXPKTCPLMODE_MASK      0x00040000U
 #define  RXPKTCPLMODE_SHIFT     18
 #define  PKTSHIFT_SHIFT         10
 #define  PKTSHIFT(x)            ((x) << PKTSHIFT_SHIFT)
 #define  PKTSHIFT_GET(x)       (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
+#define  INGPCIEBOUNDARY_32B_X 0
 #define  INGPCIEBOUNDARY_MASK   0x00000380U
 #define  INGPCIEBOUNDARY_SHIFT  7
 #define  INGPCIEBOUNDARY(x)     ((x) << INGPCIEBOUNDARY_SHIFT)
 #define  INGPADBOUNDARY(x)      ((x) << INGPADBOUNDARY_SHIFT)
 #define  INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
                                 >> INGPADBOUNDARY_SHIFT)
+#define  INGPACKBOUNDARY_16B_X 0
+#define  INGPACKBOUNDARY_SHIFT_X 5
+
+#define  INGPACKBOUNDARY_S     16
+#define  INGPACKBOUNDARY_M     0x7U
+#define  INGPACKBOUNDARY_V(x)  ((x) << INGPACKBOUNDARY_S)
+#define  INGPACKBOUNDARY_G(x)  (((x) >> INGPACKBOUNDARY_S) \
+                                & INGPACKBOUNDARY_M)
 #define  EGRPCIEBOUNDARY_MASK   0x0000000eU
 #define  EGRPCIEBOUNDARY_SHIFT  1
 #define  EGRPCIEBOUNDARY(x)     ((x) << EGRPCIEBOUNDARY_SHIFT)
index 68eaa9c88c7d8a77646bd217e00877a281254a59..3d06e77d7121510e70c846c5d9a6c0e7526aa736 100644 (file)
@@ -299,6 +299,14 @@ struct sge {
        u16 timer_val[SGE_NTIMERS];     /* interrupt holdoff timer array */
        u8 counter_val[SGE_NCOUNTERS];  /* interrupt RX threshold array */
 
+       /* Decoded Adapter Parameters.
+        */
+       u32 fl_pg_order;                /* large page allocation size */
+       u32 stat_len;                   /* length of status page at ring end */
+       u32 pktshift;                   /* padding between CPL & packet data */
+       u32 fl_align;                   /* response queue message alignment */
+       u32 fl_starve_thres;            /* Free List starvation threshold */
+
        /*
         * Reverse maps from Absolute Queue IDs to associated queue pointers.
         * The absolute Queue IDs are in a compact range which start at a
index 85036e6b42c4cd4f7663ef2487948448d2d04be8..fdd078d7d82c661ec5c63e8198c9ae6d464366c3 100644 (file)
 #include "../cxgb4/t4fw_api.h"
 #include "../cxgb4/t4_msg.h"
 
-/*
- * Decoded Adapter Parameters.
- */
-static u32 FL_PG_ORDER;                /* large page allocation size */
-static u32 STAT_LEN;           /* length of status page at ring end */
-static u32 PKTSHIFT;           /* padding between CPL and packet data */
-static u32 FL_ALIGN;           /* response queue message alignment */
-
 /*
  * Constants ...
  */
@@ -101,12 +93,6 @@ enum {
        TX_QCHECK_PERIOD = (HZ / 2),
        MAX_TIMER_TX_RECLAIM = 100,
 
-       /*
-        * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
-        * timer will attempt to refill it.
-        */
-       FL_STARVE_THRES = 4,
-
        /*
         * Suspend an Ethernet TX queue with fewer available descriptors than
         * this.  We always want to have room for a maximum sized packet:
@@ -264,15 +250,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
 
 /**
  *     fl_starving - return whether a Free List is starving.
+ *     @adapter: pointer to the adapter
  *     @fl: the Free List
  *
  *     Tests specified Free List to see whether the number of buffers
  *     available to the hardware has falled below our "starvation"
  *     threshold.
  */
-static inline bool fl_starving(const struct sge_fl *fl)
+static inline bool fl_starving(const struct adapter *adapter,
+                              const struct sge_fl *fl)
 {
-       return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
+       const struct sge *s = &adapter->sge;
+
+       return fl->avail - fl->pend_cred <= s->fl_starve_thres;
 }
 
 /**
@@ -457,13 +447,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter,
 
 /**
  *     get_buf_size - return the size of an RX Free List buffer.
+ *     @adapter: pointer to the associated adapter
  *     @sdesc: pointer to the software buffer descriptor
  */
-static inline int get_buf_size(const struct rx_sw_desc *sdesc)
+static inline int get_buf_size(const struct adapter *adapter,
+                              const struct rx_sw_desc *sdesc)
 {
-       return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
-               ? (PAGE_SIZE << FL_PG_ORDER)
-               : PAGE_SIZE;
+       const struct sge *s = &adapter->sge;
+
+       return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
+               ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
 }
 
 /**
@@ -483,7 +476,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
 
                if (is_buf_mapped(sdesc))
                        dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
-                                      get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+                                      get_buf_size(adapter, sdesc),
+                                      PCI_DMA_FROMDEVICE);
                put_page(sdesc->page);
                sdesc->page = NULL;
                if (++fl->cidx == fl->size)
@@ -511,7 +505,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
 
        if (is_buf_mapped(sdesc))
                dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
-                              get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+                              get_buf_size(adapter, sdesc),
+                              PCI_DMA_FROMDEVICE);
        sdesc->page = NULL;
        if (++fl->cidx == fl->size)
                fl->cidx = 0;
@@ -589,6 +584,7 @@ static inline void poison_buf(struct page *page, size_t sz)
 static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
                              int n, gfp_t gfp)
 {
+       struct sge *s = &adapter->sge;
        struct page *page;
        dma_addr_t dma_addr;
        unsigned int cred = fl->avail;
@@ -608,12 +604,12 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
         * If we don't support large pages, drop directly into the small page
         * allocation code.
         */
-       if (FL_PG_ORDER == 0)
+       if (s->fl_pg_order == 0)
                goto alloc_small_pages;
 
        while (n) {
                page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
-                                  FL_PG_ORDER);
+                                  s->fl_pg_order);
                if (unlikely(!page)) {
                        /*
                         * We've failed inour attempt to allocate a "large
@@ -623,10 +619,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
                        fl->large_alloc_failed++;
                        break;
                }
-               poison_buf(page, PAGE_SIZE << FL_PG_ORDER);
+               poison_buf(page, PAGE_SIZE << s->fl_pg_order);
 
                dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
-                                       PAGE_SIZE << FL_PG_ORDER,
+                                       PAGE_SIZE << s->fl_pg_order,
                                        PCI_DMA_FROMDEVICE);
                if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
                        /*
@@ -637,7 +633,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
                         * because DMA mapping resources are typically
                         * critical resources once they become scarse.
                         */
-                       __free_pages(page, FL_PG_ORDER);
+                       __free_pages(page, s->fl_pg_order);
                        goto out;
                }
                dma_addr |= RX_LARGE_BUF;
@@ -693,7 +689,7 @@ out:
        fl->pend_cred += cred;
        ring_fl_db(adapter, fl);
 
-       if (unlikely(fl_starving(fl))) {
+       if (unlikely(fl_starving(adapter, fl))) {
                smp_wmb();
                set_bit(fl->cntxt_id, adapter->sge.starving_fl);
        }
@@ -1468,6 +1464,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl)
 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
                   const struct cpl_rx_pkt *pkt)
 {
+       struct adapter *adapter = rxq->rspq.adapter;
+       struct sge *s = &adapter->sge;
        int ret;
        struct sk_buff *skb;
 
@@ -1478,8 +1476,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
                return;
        }
 
-       copy_frags(skb, gl, PKTSHIFT);
-       skb->len = gl->tot_len - PKTSHIFT;
+       copy_frags(skb, gl, s->pktshift);
+       skb->len = gl->tot_len - s->pktshift;
        skb->data_len = skb->len;
        skb->truesize += skb->data_len;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1516,6 +1514,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
        bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
                       (rspq->netdev->features & NETIF_F_RXCSUM);
        struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+       struct adapter *adapter = rspq->adapter;
+       struct sge *s = &adapter->sge;
 
        /*
         * If this is a good TCP packet and we have Generic Receive Offload
@@ -1537,7 +1537,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
                rxq->stats.rx_drops++;
                return 0;
        }
-       __skb_pull(skb, PKTSHIFT);
+       __skb_pull(skb, s->pktshift);
        skb->protocol = eth_type_trans(skb, rspq->netdev);
        skb_record_rx_queue(skb, rspq->idx);
        rxq->stats.pkts++;
@@ -1648,6 +1648,8 @@ static inline void rspq_next(struct sge_rspq *rspq)
 static int process_responses(struct sge_rspq *rspq, int budget)
 {
        struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+       struct adapter *adapter = rspq->adapter;
+       struct sge *s = &adapter->sge;
        int budget_left = budget;
 
        while (likely(budget_left)) {
@@ -1697,7 +1699,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
                                BUG_ON(frag >= MAX_SKB_FRAGS);
                                BUG_ON(rxq->fl.avail == 0);
                                sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
-                               bufsz = get_buf_size(sdesc);
+                               bufsz = get_buf_size(adapter, sdesc);
                                fp->page = sdesc->page;
                                fp->offset = rspq->offset;
                                fp->size = min(bufsz, len);
@@ -1726,7 +1728,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
                         */
                        ret = rspq->handler(rspq, rspq->cur_desc, &gl);
                        if (likely(ret == 0))
-                               rspq->offset += ALIGN(fp->size, FL_ALIGN);
+                               rspq->offset += ALIGN(fp->size, s->fl_align);
                        else
                                restore_rx_bufs(&gl, &rxq->fl, frag);
                } else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1963,7 +1965,7 @@ static void sge_rx_timer_cb(unsigned long data)
                         * schedule napi but the FL is no longer starving.
                         * No biggie.
                         */
-                       if (fl_starving(fl)) {
+                       if (fl_starving(adapter, fl)) {
                                struct sge_eth_rxq *rxq;
 
                                rxq = container_of(fl, struct sge_eth_rxq, fl);
@@ -2047,6 +2049,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
                       int intr_dest,
                       struct sge_fl *fl, rspq_handler_t hnd)
 {
+       struct sge *s = &adapter->sge;
        struct port_info *pi = netdev_priv(dev);
        struct fw_iq_cmd cmd, rpl;
        int ret, iqandst, flsz = 0;
@@ -2117,7 +2120,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
                fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
                fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
                                      sizeof(__be64), sizeof(struct rx_sw_desc),
-                                     &fl->addr, &fl->sdesc, STAT_LEN);
+                                     &fl->addr, &fl->sdesc, s->stat_len);
                if (!fl->desc) {
                        ret = -ENOMEM;
                        goto err;
@@ -2129,7 +2132,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
                 * free list ring) in Egress Queue Units.
                 */
                flsz = (fl->size / FL_PER_EQ_UNIT +
-                       STAT_LEN / EQ_UNIT);
+                       s->stat_len / EQ_UNIT);
 
                /*
                 * Fill in all the relevant firmware Ingress Queue Command
@@ -2217,6 +2220,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
                           struct net_device *dev, struct netdev_queue *devq,
                           unsigned int iqid)
 {
+       struct sge *s = &adapter->sge;
        int ret, nentries;
        struct fw_eq_eth_cmd cmd, rpl;
        struct port_info *pi = netdev_priv(dev);
@@ -2225,7 +2229,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
         * Calculate the size of the hardware TX Queue (including the Status
         * Page on the end of the TX Queue) in units of TX Descriptors.
         */
-       nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+       nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
 
        /*
         * Allocate the hardware ring for the TX ring (with space for its
@@ -2234,7 +2238,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
        txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
                                 sizeof(struct tx_desc),
                                 sizeof(struct tx_sw_desc),
-                                &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+                                &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
        if (!txq->q.desc)
                return -ENOMEM;
 
@@ -2307,8 +2311,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
  */
 static void free_txq(struct adapter *adapter, struct sge_txq *tq)
 {
+       struct sge *s = &adapter->sge;
+
        dma_free_coherent(adapter->pdev_dev,
-                         tq->size * sizeof(*tq->desc) + STAT_LEN,
+                         tq->size * sizeof(*tq->desc) + s->stat_len,
                          tq->desc, tq->phys_addr);
        tq->cntxt_id = 0;
        tq->sdesc = NULL;
@@ -2322,6 +2328,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq)
 static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
                         struct sge_fl *fl)
 {
+       struct sge *s = &adapter->sge;
        unsigned int flid = fl ? fl->cntxt_id : 0xffff;
 
        t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
@@ -2337,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
        if (fl) {
                free_rx_bufs(adapter, fl, fl->avail);
                dma_free_coherent(adapter->pdev_dev,
-                                 fl->size * sizeof(*fl->desc) + STAT_LEN,
+                                 fl->size * sizeof(*fl->desc) + s->stat_len,
                                  fl->desc, fl->addr);
                kfree(fl->sdesc);
                fl->sdesc = NULL;
@@ -2423,6 +2430,7 @@ int t4vf_sge_init(struct adapter *adapter)
        u32 fl0 = sge_params->sge_fl_buffer_size[0];
        u32 fl1 = sge_params->sge_fl_buffer_size[1];
        struct sge *s = &adapter->sge;
+       unsigned int ingpadboundary, ingpackboundary;
 
        /*
         * Start by vetting the basic SGE parameters which have been set up by
@@ -2443,12 +2451,48 @@ int t4vf_sge_init(struct adapter *adapter)
         * Now translate the adapter parameters into our internal forms.
         */
        if (fl1)
-               FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
-       STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
-                   ? 128 : 64);
-       PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
-       FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
-                        SGE_INGPADBOUNDARY_SHIFT);
+               s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+       s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
+                       ? 128 : 64);
+       s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
+
+       /* T4 uses a single control field to specify both the PCIe Padding and
+        * Packing Boundary.  T5 introduced the ability to specify these
+        * separately.  The actual Ingress Packet Data alignment boundary
+        * within Packed Buffer Mode is the maximum of these two
+        * specifications.  (Note that it makes no real practical sense to
+        * have the Pading Boudary be larger than the Packing Boundary but you
+        * could set the chip up that way and, in fact, legacy T4 code would
+        * end doing this because it would initialize the Padding Boundary and
+        * leave the Packing Boundary initialized to 0 (16 bytes).)
+        */
+       ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
+                              X_INGPADBOUNDARY_SHIFT);
+       if (is_t4(adapter->params.chip)) {
+               s->fl_align = ingpadboundary;
+       } else {
+               /* T5 has a different interpretation of one of the PCIe Packing
+                * Boundary values.
+                */
+               ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
+               if (ingpackboundary == INGPACKBOUNDARY_16B_X)
+                       ingpackboundary = 16;
+               else
+                       ingpackboundary = 1 << (ingpackboundary +
+                                               INGPACKBOUNDARY_SHIFT_X);
+
+               s->fl_align = max(ingpadboundary, ingpackboundary);
+       }
+
+       /* A FL with <= fl_starve_thres buffers is starving and a periodic
+        * timer will attempt to refill it.  This needs to be larger than the
+        * SGE's Egress Congestion Threshold.  If it isn't, then we can get
+        * stuck waiting for new packets while the SGE is waiting for us to
+        * give it more Free List entries.  (Note that the SGE's Egress
+        * Congestion Threshold is in units of 2 Free List pointers.)
+        */
+       s->fl_starve_thres
+               = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
 
        /*
         * Set up tasklet timers.
index 95df61dcb4ce57021237a15013bfdc8e88c0fa5d..4b6a6d14d86d98df42bea1de5a6a507d9820374e 100644 (file)
@@ -134,11 +134,13 @@ struct dev_params {
  */
 struct sge_params {
        u32 sge_control;                /* padding, boundaries, lengths, etc. */
+       u32 sge_control2;               /* T5: more of the same */
        u32 sge_host_page_size;         /* RDMA page sizes */
        u32 sge_queues_per_page;        /* RDMA queues/page */
        u32 sge_user_mode_limits;       /* limits for BAR2 user mode accesses */
        u32 sge_fl_buffer_size[16];     /* free list buffer sizes */
        u32 sge_ingress_rx_threshold;   /* RX counter interrupt threshold[4] */
+       u32 sge_congestion_control;     /* congestion thresholds, etc. */
        u32 sge_timer_value_0_and_1;    /* interrupt coalescing timer values */
        u32 sge_timer_value_2_and_3;
        u32 sge_timer_value_4_and_5;
index e984fdc48ba2e326a738e6f5bfa14e1bfda73f02..1e896b923234f6d1b2e21da52fd73b010d45f214 100644 (file)
@@ -468,12 +468,38 @@ int t4vf_get_sge_params(struct adapter *adapter)
        sge_params->sge_timer_value_2_and_3 = vals[5];
        sge_params->sge_timer_value_4_and_5 = vals[6];
 
+       /* T4 uses a single control field to specify both the PCIe Padding and
+        * Packing Boundary.  T5 introduced the ability to specify these
+        * separately with the Padding Boundary in SGE_CONTROL and and Packing
+        * Boundary in SGE_CONTROL2.  So for T5 and later we need to grab
+        * SGE_CONTROL in order to determine how ingress packet data will be
+        * laid out in Packed Buffer Mode.  Unfortunately, older versions of
+        * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
+        * failure grabbing it we throw an error since we can't figure out the
+        * right value.
+        */
+       if (!is_t4(adapter->params.chip)) {
+               params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+                            FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A));
+               v = t4vf_query_params(adapter, 1, params, vals);
+               if (v != FW_SUCCESS) {
+                       dev_err(adapter->pdev_dev,
+                               "Unable to get SGE Control2; "
+                               "probably old firmware.\n");
+                       return v;
+               }
+               sge_params->sge_control2 = vals[0];
+       }
+
        params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
                     FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
-       v = t4vf_query_params(adapter, 1, params, vals);
+       params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+                    FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL));
+       v = t4vf_query_params(adapter, 2, params, vals);
        if (v)
                return v;
        sge_params->sge_ingress_rx_threshold = vals[0];
+       sge_params->sge_congestion_control = vals[1];
 
        return 0;
 }
index 180e53fa628face1c64b5306afdf5c95aaca89a6..73cf1653a4a3d4225951334e26506e89145cd1e4 100644 (file)
@@ -940,18 +940,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
        struct vnic_rq_buf *buf = rq->to_use;
 
        if (buf->os_buf) {
-               buf = buf->next;
-               rq->to_use = buf;
-               rq->ring.desc_avail--;
-               if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
-                       /* Adding write memory barrier prevents compiler and/or
-                        * CPU reordering, thus avoiding descriptor posting
-                        * before descriptor is initialized. Otherwise, hardware
-                        * can read stale descriptor fields.
-                        */
-                       wmb();
-                       iowrite32(buf->index, &rq->ctrl->posted_index);
-               }
+               enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
+                                  buf->len);
 
                return 0;
        }
@@ -1037,7 +1027,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
                                enic->rq_truncated_pkts++;
                }
 
+               pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
+                                PCI_DMA_FROMDEVICE);
                dev_kfree_skb_any(skb);
+               buf->os_buf = NULL;
 
                return;
        }
@@ -1088,7 +1081,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
                /* Buffer overflow
                 */
 
+               pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
+                                PCI_DMA_FROMDEVICE);
                dev_kfree_skb_any(skb);
+               buf->os_buf = NULL;
        }
 }
 
index 9a18e7930b31bea1cebc533d014655af5673c748..597c463e384d0d5d9fb0f46a9b363e9c9165c6cf 100644 (file)
@@ -4309,11 +4309,16 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
                return -EOPNOTSUPP;
 
        br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+       if (!br_spec)
+               return -EINVAL;
 
        nla_for_each_nested(attr, br_spec, rem) {
                if (nla_type(attr) != IFLA_BRIDGE_MODE)
                        continue;
 
+               if (nla_len(attr) < sizeof(mode))
+                       return -EINVAL;
+
                mode = nla_get_u16(attr);
                if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
                        return -EINVAL;
@@ -4421,6 +4426,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
                 "Disabled VxLAN offloads for UDP port %d\n",
                 be16_to_cpu(port));
 }
+
+static bool be_gso_check(struct sk_buff *skb, struct net_device *dev)
+{
+       return vxlan_gso_check(skb);
+}
 #endif
 
 static const struct net_device_ops be_netdev_ops = {
@@ -4450,6 +4460,7 @@ static const struct net_device_ops be_netdev_ops = {
 #ifdef CONFIG_BE2NET_VXLAN
        .ndo_add_vxlan_port     = be_add_vxlan_port,
        .ndo_del_vxlan_port     = be_del_vxlan_port,
+       .ndo_gso_check          = be_gso_check,
 #endif
 };
 
index 50a851db2852e28979a0b78f481347fc9e29778a..3dca494797bd2ebb8a7dd6c1983b7f19e72e481c 100644 (file)
@@ -298,6 +298,16 @@ static void *swap_buffer(void *bufaddr, int len)
        return bufaddr;
 }
 
+static void swap_buffer2(void *dst_buf, void *src_buf, int len)
+{
+       int i;
+       unsigned int *src = src_buf;
+       unsigned int *dst = dst_buf;
+
+       for (i = 0; i < len; i += 4, src++, dst++)
+               *dst = swab32p(src);
+}
+
 static void fec_dump(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1307,7 +1317,7 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
 }
 
 static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
-                              struct bufdesc *bdp, u32 length)
+                              struct bufdesc *bdp, u32 length, bool swap)
 {
        struct  fec_enet_private *fep = netdev_priv(ndev);
        struct sk_buff *new_skb;
@@ -1322,7 +1332,10 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
        dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
                                FEC_ENET_RX_FRSIZE - fep->rx_align,
                                DMA_FROM_DEVICE);
-       memcpy(new_skb->data, (*skb)->data, length);
+       if (!swap)
+               memcpy(new_skb->data, (*skb)->data, length);
+       else
+               swap_buffer2(new_skb->data, (*skb)->data, length);
        *skb = new_skb;
 
        return true;
@@ -1352,6 +1365,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
        u16     vlan_tag;
        int     index = 0;
        bool    is_copybreak;
+       bool    need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME;
 
 #ifdef CONFIG_M532x
        flush_cache_all();
@@ -1415,7 +1429,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                 * include that when passing upstream as it messes up
                 * bridging applications.
                 */
-               is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4);
+               is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
+                                                 need_swap);
                if (!is_copybreak) {
                        skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
                        if (unlikely(!skb_new)) {
@@ -1430,7 +1445,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                prefetch(skb->data - NET_IP_ALIGN);
                skb_put(skb, pkt_len - 4);
                data = skb->data;
-               if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+               if (!is_copybreak && need_swap)
                        swap_buffer(data, pkt_len);
 
                /* Extract the enhanced buffer descriptor */
@@ -3343,12 +3358,11 @@ static int __maybe_unused fec_suspend(struct device *dev)
                netif_device_detach(ndev);
                netif_tx_unlock_bh(ndev);
                fec_stop(ndev);
+               fec_enet_clk_enable(ndev, false);
+               pinctrl_pm_select_sleep_state(&fep->pdev->dev);
        }
        rtnl_unlock();
 
-       fec_enet_clk_enable(ndev, false);
-       pinctrl_pm_select_sleep_state(&fep->pdev->dev);
-
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
 
@@ -3367,13 +3381,14 @@ static int __maybe_unused fec_resume(struct device *dev)
                        return ret;
        }
 
-       pinctrl_pm_select_default_state(&fep->pdev->dev);
-       ret = fec_enet_clk_enable(ndev, true);
-       if (ret)
-               goto failed_clk;
-
        rtnl_lock();
        if (netif_running(ndev)) {
+               pinctrl_pm_select_default_state(&fep->pdev->dev);
+               ret = fec_enet_clk_enable(ndev, true);
+               if (ret) {
+                       rtnl_unlock();
+                       goto failed_clk;
+               }
                fec_restart(ndev);
                netif_tx_lock_bh(ndev);
                netif_device_attach(ndev);
index a2d72a87cbde40465c16277e32d4a242a2873ee3..487cd9c4ac0d33a3ce07bb12fbe3f5db00e01586 100644 (file)
@@ -1012,7 +1012,8 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
        /* igb_get_stats64() might access the rings on this vector,
         * we must wait a grace period before freeing it.
         */
-       kfree_rcu(q_vector, rcu);
+       if (q_vector)
+               kfree_rcu(q_vector, rcu);
 }
 
 /**
@@ -1792,8 +1793,10 @@ void igb_down(struct igb_adapter *adapter)
        adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
 
        for (i = 0; i < adapter->num_q_vectors; i++) {
-               napi_synchronize(&(adapter->q_vector[i]->napi));
-               napi_disable(&(adapter->q_vector[i]->napi));
+               if (adapter->q_vector[i]) {
+                       napi_synchronize(&adapter->q_vector[i]->napi);
+                       napi_disable(&adapter->q_vector[i]->napi);
+               }
        }
 
 
@@ -3717,7 +3720,8 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               igb_free_tx_resources(adapter->tx_ring[i]);
+               if (adapter->tx_ring[i])
+                       igb_free_tx_resources(adapter->tx_ring[i]);
 }
 
 void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
@@ -3782,7 +3786,8 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               igb_clean_tx_ring(adapter->tx_ring[i]);
+               if (adapter->tx_ring[i])
+                       igb_clean_tx_ring(adapter->tx_ring[i]);
 }
 
 /**
@@ -3819,7 +3824,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               igb_free_rx_resources(adapter->rx_ring[i]);
+               if (adapter->rx_ring[i])
+                       igb_free_rx_resources(adapter->rx_ring[i]);
 }
 
 /**
@@ -3874,7 +3880,8 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               igb_clean_rx_ring(adapter->rx_ring[i]);
+               if (adapter->rx_ring[i])
+                       igb_clean_rx_ring(adapter->rx_ring[i]);
 }
 
 /**
@@ -7404,6 +7411,8 @@ static int igb_resume(struct device *dev)
        pci_restore_state(pdev);
        pci_save_state(pdev);
 
+       if (!pci_device_is_present(pdev))
+               return -ENODEV;
        err = pci_enable_device_mem(pdev);
        if (err) {
                dev_err(&pdev->dev,
index d2df4e3d1032496dbf294f4d7b0b741ddfaac6d8..cc51554c9e99a49e74c24ab7bf8f97848a06d7a3 100644 (file)
@@ -3936,8 +3936,8 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
                 * if SR-IOV and VMDQ are disabled - otherwise ensure
                 * that hardware VLAN filters remain enabled.
                 */
-               if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
-                                       IXGBE_FLAG_SRIOV_ENABLED)))
+               if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
+                                     IXGBE_FLAG_SRIOV_ENABLED))
                        vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
        } else {
                if (netdev->flags & IFF_ALLMULTI) {
@@ -7669,6 +7669,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
                return -EOPNOTSUPP;
 
        br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+       if (!br_spec)
+               return -EINVAL;
 
        nla_for_each_nested(attr, br_spec, rem) {
                __u16 mode;
@@ -7677,6 +7679,9 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
                if (nla_type(attr) != IFLA_BRIDGE_MODE)
                        continue;
 
+               if (nla_len(attr) < sizeof(mode))
+                       return -EINVAL;
+
                mode = nla_get_u16(attr);
                if (mode == BRIDGE_MODE_VEPA) {
                        reg = 0;
@@ -7979,6 +7984,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        int i, err, pci_using_dac, expected_gts;
        unsigned int indices = MAX_TX_QUEUES;
        u8 part_str[IXGBE_PBANUM_LENGTH];
+       bool disable_dev = false;
 #ifdef IXGBE_FCOE
        u16 device_caps;
 #endif
@@ -8369,13 +8375,14 @@ err_sw_init:
        iounmap(adapter->io_addr);
        kfree(adapter->mac_table);
 err_ioremap:
+       disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
        free_netdev(netdev);
 err_alloc_etherdev:
        pci_release_selected_regions(pdev,
                                     pci_select_bars(pdev, IORESOURCE_MEM));
 err_pci_reg:
 err_dma:
-       if (!adapter || !test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+       if (!adapter || disable_dev)
                pci_disable_device(pdev);
        return err;
 }
@@ -8393,6 +8400,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
 {
        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
        struct net_device *netdev = adapter->netdev;
+       bool disable_dev;
 
        ixgbe_dbg_adapter_exit(adapter);
 
@@ -8442,11 +8450,12 @@ static void ixgbe_remove(struct pci_dev *pdev)
        e_dev_info("complete\n");
 
        kfree(adapter->mac_table);
+       disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
        free_netdev(netdev);
 
        pci_disable_pcie_error_reporting(pdev);
 
-       if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+       if (disable_dev)
                pci_disable_device(pdev);
 }
 
index d47b19f27c355c96ea006c004afa38efac3dcfe7..28b81ae09b5ae2288e8d05fac82176a8d504fd18 100644 (file)
@@ -635,7 +635,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
  **/
 s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
 {
-       s32 status;
        u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
        bool autoneg = false;
        ixgbe_link_speed speed;
@@ -700,8 +699,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
 
        hw->phy.ops.write_reg(hw, MDIO_CTRL1,
                              MDIO_MMD_AN, autoneg_reg);
-
-       return status;
+       return 0;
 }
 
 /**
index b151a949f352a20ec8e74b4f3a7b6bb194ce841c..d44560d1d268caae42143b674535db45c2784289 100644 (file)
@@ -1047,7 +1047,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                int tx_index;
                struct tx_desc *desc;
                u32 cmd_sts;
-               struct sk_buff *skb;
 
                tx_index = txq->tx_used_desc;
                desc = &txq->tx_desc_area[tx_index];
@@ -1066,19 +1065,22 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                reclaimed++;
                txq->tx_desc_count--;
 
-               skb = NULL;
-               if (cmd_sts & TX_LAST_DESC)
-                       skb = __skb_dequeue(&txq->tx_skb);
+               if (!IS_TSO_HEADER(txq, desc->buf_ptr))
+                       dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
+                                        desc->byte_cnt, DMA_TO_DEVICE);
+
+               if (cmd_sts & TX_ENABLE_INTERRUPT) {
+                       struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
+
+                       if (!WARN_ON(!skb))
+                               dev_kfree_skb(skb);
+               }
 
                if (cmd_sts & ERROR_SUMMARY) {
                        netdev_info(mp->dev, "tx error\n");
                        mp->dev->stats.tx_errors++;
                }
 
-               if (!IS_TSO_HEADER(txq, desc->buf_ptr))
-                       dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
-                                        desc->byte_cnt, DMA_TO_DEVICE);
-               dev_kfree_skb(skb);
        }
 
        __netif_tx_unlock_bh(nq);
index ece83f101526de02fac0af5ccf0f85c88d440670..fdf3e382e4649313b677fa8c164e1d2430130f3b 100644 (file)
@@ -1692,6 +1692,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
 {
        struct mvpp2_prs_entry *pe;
        int tid_aux, tid;
+       int ret = 0;
 
        pe = mvpp2_prs_vlan_find(priv, tpid, ai);
 
@@ -1723,8 +1724,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
                                break;
                }
 
-               if (tid <= tid_aux)
-                       return -EINVAL;
+               if (tid <= tid_aux) {
+                       ret = -EINVAL;
+                       goto error;
+               }
 
                memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
                mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
@@ -1756,9 +1759,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
 
        mvpp2_prs_hw_write(priv, pe);
 
+error:
        kfree(pe);
 
-       return 0;
+       return ret;
 }
 
 /* Get first free double vlan ai number */
@@ -1821,7 +1825,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
                                     unsigned int port_map)
 {
        struct mvpp2_prs_entry *pe;
-       int tid_aux, tid, ai;
+       int tid_aux, tid, ai, ret = 0;
 
        pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
 
@@ -1838,8 +1842,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
 
                /* Set ai value for new double vlan entry */
                ai = mvpp2_prs_double_vlan_ai_free_get(priv);
-               if (ai < 0)
-                       return ai;
+               if (ai < 0) {
+                       ret = ai;
+                       goto error;
+               }
 
                /* Get first single/triple vlan tid */
                for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
@@ -1859,8 +1865,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
                                break;
                }
 
-               if (tid >= tid_aux)
-                       return -ERANGE;
+               if (tid >= tid_aux) {
+                       ret = -ERANGE;
+                       goto error;
+               }
 
                memset(pe, 0, sizeof(struct mvpp2_prs_entry));
                mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
@@ -1887,8 +1895,9 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
        mvpp2_prs_tcam_port_map_set(pe, port_map);
        mvpp2_prs_hw_write(priv, pe);
 
+error:
        kfree(pe);
-       return 0;
+       return ret;
 }
 
 /* IPv4 header parsing for fragmentation and L4 offset */
index f3032fec8fce03767580bd555a4760e8d7faba1e..4d69e382b4e5d07ab22fe327276c867099a12ca4 100644 (file)
@@ -1693,7 +1693,7 @@ int mlx4_en_start_port(struct net_device *dev)
        mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
 
 #ifdef CONFIG_MLX4_EN_VXLAN
-       if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
+       if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
                vxlan_get_rx_port(dev);
 #endif
        priv->port_up = true;
@@ -2281,8 +2281,16 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
        ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
                                  VXLAN_STEER_BY_OUTER_MAC, 1);
 out:
-       if (ret)
+       if (ret) {
                en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
+               return;
+       }
+
+       /* set offloads */
+       priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+                                     NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+       priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+       priv->dev->features    |= NETIF_F_GSO_UDP_TUNNEL;
 }
 
 static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2290,6 +2298,11 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
        int ret;
        struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
                                                 vxlan_del_task);
+       /* unset offloads */
+       priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+                                     NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
+       priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+       priv->dev->features    &= ~NETIF_F_GSO_UDP_TUNNEL;
 
        ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
                                  VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2342,6 +2355,11 @@ static void mlx4_en_del_vxlan_port(struct  net_device *dev,
 
        queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
 }
+
+static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev)
+{
+       return vxlan_gso_check(skb);
+}
 #endif
 
 static const struct net_device_ops mlx4_netdev_ops = {
@@ -2373,6 +2391,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
 #ifdef CONFIG_MLX4_EN_VXLAN
        .ndo_add_vxlan_port     = mlx4_en_add_vxlan_port,
        .ndo_del_vxlan_port     = mlx4_en_del_vxlan_port,
+       .ndo_gso_check          = mlx4_en_gso_check,
 #endif
 };
 
@@ -2403,6 +2422,11 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
        .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
 #endif
        .ndo_get_phys_port_id   = mlx4_en_get_phys_port_id,
+#ifdef CONFIG_MLX4_EN_VXLAN
+       .ndo_add_vxlan_port     = mlx4_en_add_vxlan_port,
+       .ndo_del_vxlan_port     = mlx4_en_del_vxlan_port,
+       .ndo_gso_check          = mlx4_en_gso_check,
+#endif
 };
 
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -2568,13 +2592,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
                dev->priv_flags |= IFF_UNICAST_FLT;
 
-       if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
-               dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
-                                       NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
-               dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
-               dev->features    |= NETIF_F_GSO_UDP_TUNNEL;
-       }
-
        mdev->pndev[port] = dev;
 
        netif_carrier_off(dev);
index 5d2498dcf536d5e96cc13b9274b00137d4508c5d..cd5cf6d957c7afa98d76ad54ba42beb1b395e3c8 100644 (file)
@@ -1546,7 +1546,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
 
        switch (op) {
        case RES_OP_RESERVE:
-               count = get_param_l(&in_param);
+               count = get_param_l(&in_param) & 0xffffff;
                align = get_param_h(&in_param);
                err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
                if (err)
index a278238a2db643ba04b924dd9920a284abcd8908..ad2c96a02a5352caec2d8649c714c3616be98ea8 100644 (file)
@@ -374,15 +374,14 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
        snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
                 name, pci_name(dev->pdev));
        eq->eqn = out.eq_number;
+       eq->irqn = vecidx;
+       eq->dev = dev;
+       eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
        err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
                          eq->name, eq);
        if (err)
                goto err_eq;
 
-       eq->irqn = vecidx;
-       eq->dev = dev;
-       eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
-
        err = mlx5_debug_eq_add(dev, eq);
        if (err)
                goto err_irq;
index 3d8e8e489b2ddb3539a4a9c89b784974c833e50e..71b10b210792e316582d0280d87f92cbee6bb2d8 100644 (file)
@@ -864,14 +864,14 @@ static int init_one(struct pci_dev *pdev,
        dev->profile = &profile[prof_sel];
        dev->event = mlx5_core_event;
 
+       INIT_LIST_HEAD(&priv->ctx_list);
+       spin_lock_init(&priv->ctx_lock);
        err = mlx5_dev_init(dev, pdev);
        if (err) {
                dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
                goto out;
        }
 
-       INIT_LIST_HEAD(&priv->ctx_list);
-       spin_lock_init(&priv->ctx_lock);
        err = mlx5_register_device(dev);
        if (err) {
                dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
index 0b2a1ccd276dbd4c1e582ef635b6304c351d03d7..613037584d08e785ef2700ca1d2221b50b256e9c 100644 (file)
@@ -2762,7 +2762,8 @@ netxen_fw_poll_work(struct work_struct *work)
        if (test_bit(__NX_RESETTING, &adapter->state))
                goto reschedule;
 
-       if (test_bit(__NX_DEV_UP, &adapter->state)) {
+       if (test_bit(__NX_DEV_UP, &adapter->state) &&
+           !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) {
                if (!adapter->has_link_events) {
 
                        netxen_nic_handle_phy_intr(adapter);
index f5e29f7bdae39b77eed8abc0b8e73360ceeda698..a913b3ad2f899e791a9abd3c5e518d1f7410ceeb 100644 (file)
@@ -503,6 +503,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
 
        adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
 }
+
+static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev)
+{
+       return vxlan_gso_check(skb);
+}
 #endif
 
 static const struct net_device_ops qlcnic_netdev_ops = {
@@ -526,6 +531,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
 #ifdef CONFIG_QLCNIC_VXLAN
        .ndo_add_vxlan_port     = qlcnic_add_vxlan_port,
        .ndo_del_vxlan_port     = qlcnic_del_vxlan_port,
+       .ndo_gso_check          = qlcnic_gso_check,
 #endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = qlcnic_poll_controller,
index f3a47147937d46b3a7aa5955b6e9729d17597a99..9a49f42ac2ba4a76e743f1c400f9f4bd69850a6a 100644 (file)
@@ -5,7 +5,6 @@
 config NET_VENDOR_QUALCOMM
        bool "Qualcomm devices"
        default y
-       depends on SPI_MASTER && OF_GPIO
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y
          and read the Ethernet-HOWTO, available from
@@ -20,7 +19,7 @@ if NET_VENDOR_QUALCOMM
 
 config QCA7000
        tristate "Qualcomm Atheros QCA7000 support"
-       depends on SPI_MASTER && OF_GPIO
+       depends on SPI_MASTER && OF
        ---help---
          This SPI protocol driver supports the Qualcomm Atheros QCA7000.
 
index 60e9c2cd051e98bd9a1466f32e7344c52f241572..b5db6b3f939fc00884f313829cb9c1ffff118981 100644 (file)
@@ -917,21 +917,13 @@ static int sh_eth_reset(struct net_device *ndev)
        return ret;
 }
 
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
 static void sh_eth_set_receive_align(struct sk_buff *skb)
 {
-       int reserve;
+       uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
 
-       reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
        if (reserve)
-               skb_reserve(skb, reserve);
+               skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
 }
-#else
-static void sh_eth_set_receive_align(struct sk_buff *skb)
-{
-       skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
-}
-#endif
 
 
 /* CPU <-> EDMAC endian convert */
@@ -1119,6 +1111,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
        struct sh_eth_txdesc *txdesc = NULL;
        int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
        int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
+       int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
 
        mdp->cur_rx = 0;
        mdp->cur_tx = 0;
@@ -1131,21 +1124,21 @@ static void sh_eth_ring_format(struct net_device *ndev)
        for (i = 0; i < mdp->num_rx_ring; i++) {
                /* skb */
                mdp->rx_skbuff[i] = NULL;
-               skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
+               skb = netdev_alloc_skb(ndev, skbuff_size);
                mdp->rx_skbuff[i] = skb;
                if (skb == NULL)
                        break;
-               dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
-                              DMA_FROM_DEVICE);
                sh_eth_set_receive_align(skb);
 
                /* RX descriptor */
                rxdesc = &mdp->rx_ring[i];
+               /* The size of the buffer is a multiple of 16 bytes. */
+               rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
+               dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
+                              DMA_FROM_DEVICE);
                rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
                rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
 
-               /* The size of the buffer is 16 byte boundary. */
-               rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
                /* Rx descriptor address set */
                if (i == 0) {
                        sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
@@ -1397,6 +1390,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
        struct sk_buff *skb;
        u16 pkt_len = 0;
        u32 desc_status;
+       int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
 
        rxdesc = &mdp->rx_ring[entry];
        while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
@@ -1448,7 +1442,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        if (mdp->cd->rpadir)
                                skb_reserve(skb, NET_IP_ALIGN);
                        dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
-                                               mdp->rx_buf_sz,
+                                               ALIGN(mdp->rx_buf_sz, 16),
                                                DMA_FROM_DEVICE);
                        skb_put(skb, pkt_len);
                        skb->protocol = eth_type_trans(skb, ndev);
@@ -1468,13 +1462,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
 
                if (mdp->rx_skbuff[entry] == NULL) {
-                       skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
+                       skb = netdev_alloc_skb(ndev, skbuff_size);
                        mdp->rx_skbuff[entry] = skb;
                        if (skb == NULL)
                                break;  /* Better luck next round. */
-                       dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
-                                      DMA_FROM_DEVICE);
                        sh_eth_set_receive_align(skb);
+                       dma_map_single(&ndev->dev, skb->data,
+                                      rxdesc->buffer_length, DMA_FROM_DEVICE);
 
                        skb_checksum_none_assert(skb);
                        rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
@@ -2042,6 +2036,8 @@ static int sh_eth_open(struct net_device *ndev)
        if (ret)
                goto out_free_irq;
 
+       mdp->is_opened = 1;
+
        return ret;
 
 out_free_irq:
@@ -2131,6 +2127,36 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        return NETDEV_TX_OK;
 }
 
+static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+
+       if (sh_eth_is_rz_fast_ether(mdp))
+               return &ndev->stats;
+
+       if (!mdp->is_opened)
+               return &ndev->stats;
+
+       ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
+       sh_eth_write(ndev, 0, TROCR);   /* (write clear) */
+       ndev->stats.collisions += sh_eth_read(ndev, CDCR);
+       sh_eth_write(ndev, 0, CDCR);    /* (write clear) */
+       ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
+       sh_eth_write(ndev, 0, LCCR);    /* (write clear) */
+
+       if (sh_eth_is_gether(mdp)) {
+               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
+               sh_eth_write(ndev, 0, CERCR);   /* (write clear) */
+               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
+               sh_eth_write(ndev, 0, CEECR);   /* (write clear) */
+       } else {
+               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
+               sh_eth_write(ndev, 0, CNDCR);   /* (write clear) */
+       }
+
+       return &ndev->stats;
+}
+
 /* device close function */
 static int sh_eth_close(struct net_device *ndev)
 {
@@ -2145,6 +2171,7 @@ static int sh_eth_close(struct net_device *ndev)
        sh_eth_write(ndev, 0, EDTRR);
        sh_eth_write(ndev, 0, EDRRR);
 
+       sh_eth_get_stats(ndev);
        /* PHY Disconnect */
        if (mdp->phydev) {
                phy_stop(mdp->phydev);
@@ -2163,36 +2190,9 @@ static int sh_eth_close(struct net_device *ndev)
 
        pm_runtime_put_sync(&mdp->pdev->dev);
 
-       return 0;
-}
-
-static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-
-       if (sh_eth_is_rz_fast_ether(mdp))
-               return &ndev->stats;
+       mdp->is_opened = 0;
 
-       pm_runtime_get_sync(&mdp->pdev->dev);
-
-       ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
-       sh_eth_write(ndev, 0, TROCR);   /* (write clear) */
-       ndev->stats.collisions += sh_eth_read(ndev, CDCR);
-       sh_eth_write(ndev, 0, CDCR);    /* (write clear) */
-       ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
-       sh_eth_write(ndev, 0, LCCR);    /* (write clear) */
-       if (sh_eth_is_gether(mdp)) {
-               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
-               sh_eth_write(ndev, 0, CERCR);   /* (write clear) */
-               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
-               sh_eth_write(ndev, 0, CEECR);   /* (write clear) */
-       } else {
-               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
-               sh_eth_write(ndev, 0, CNDCR);   /* (write clear) */
-       }
-       pm_runtime_put_sync(&mdp->pdev->dev);
-
-       return &ndev->stats;
+       return 0;
 }
 
 /* ioctl to device function */
index b37c427144ee0a1010794ff2beff42bc298fd1e9..22301bf9c21daeb925d75aa7ce5c7a588d977e11 100644 (file)
@@ -162,9 +162,9 @@ enum {
 
 /* Driver's parameters */
 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
-#define SH4_SKB_RX_ALIGN       32
+#define SH_ETH_RX_ALIGN                32
 #else
-#define SH2_SH3_SKB_RX_ALIGN   2
+#define SH_ETH_RX_ALIGN                2
 #endif
 
 /* Register's bits
@@ -522,6 +522,7 @@ struct sh_eth_private {
 
        unsigned no_ether_link:1;
        unsigned ether_link_active_low:1;
+       unsigned is_opened:1;
 };
 
 static inline void sh_eth_soft_swap(char *src, int len)
index 002d4cdc319fda80a064c7cea199e1bf417d479c..a77f05ce832596d0c8eb10ef3a9735f48ba64b55 100644 (file)
@@ -180,7 +180,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
                      EFX_MAX_CHANNELS,
                      resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
                      (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
-       BUG_ON(efx->max_channels == 0);
+       if (WARN_ON(efx->max_channels == 0))
+               return -EIO;
 
        nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
        if (!nic_data)
index 2c62208077fe7a442aa1b5340e0e614d3b247c21..6cc3cf6f17c846cdcf900d47f17b40749dfb5966 100644 (file)
@@ -2243,9 +2243,10 @@ static int smc_drv_probe(struct platform_device *pdev)
        const struct of_device_id *match = NULL;
        struct smc_local *lp;
        struct net_device *ndev;
-       struct resource *res, *ires;
+       struct resource *res;
        unsigned int __iomem *addr;
        unsigned long irq_flags = SMC_IRQ_FLAGS;
+       unsigned long irq_resflags;
        int ret;
 
        ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2337,16 +2338,19 @@ static int smc_drv_probe(struct platform_device *pdev)
                goto out_free_netdev;
        }
 
-       ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!ires) {
+       ndev->irq = platform_get_irq(pdev, 0);
+       if (ndev->irq <= 0) {
                ret = -ENODEV;
                goto out_release_io;
        }
-
-       ndev->irq = ires->start;
-
-       if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
-               irq_flags = ires->flags & IRQF_TRIGGER_MASK;
+       /*
+        * If this platform does not specify any special irqflags, or if
+        * the resource supplies a trigger, override the irqflags with
+        * the trigger flags from the resource.
+        */
+       irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
+       if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
+               irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
 
        ret = smc_request_attrib(pdev, ndev);
        if (ret)
index affb29da353e29139fb758c5a5ce68b701479815..77ed74561e5fe815de9c7efdd1477404d3591b81 100644 (file)
@@ -1342,6 +1342,42 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
        spin_unlock(&pdata->mac_lock);
 }
 
+static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
+{
+       int rc = 0;
+
+       if (!pdata->phy_dev)
+               return rc;
+
+       /* If the internal PHY is in General Power-Down mode, all, except the
+        * management interface, is powered-down and stays in that condition as
+        * long as Phy register bit 0.11 is HIGH.
+        *
+        * In that case, clear the bit 0.11, so the PHY powers up and we can
+        * access to the phy registers.
+        */
+       rc = phy_read(pdata->phy_dev, MII_BMCR);
+       if (rc < 0) {
+               SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
+               return rc;
+       }
+
+       /* If the PHY general power-down bit is not set is not necessary to
+        * disable the general power down-mode.
+        */
+       if (rc & BMCR_PDOWN) {
+               rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
+               if (rc < 0) {
+                       SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
+                       return rc;
+               }
+
+               usleep_range(1000, 1500);
+       }
+
+       return 0;
+}
+
 static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
 {
        int rc = 0;
@@ -1356,12 +1392,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
                return rc;
        }
 
-       /*
-        * If energy is detected the PHY is already awake so is not necessary
-        * to disable the energy detect power-down mode.
-        */
-       if ((rc & MII_LAN83C185_EDPWRDOWN) &&
-           !(rc & MII_LAN83C185_ENERGYON)) {
+       /* Only disable if energy detect mode is already enabled */
+       if (rc & MII_LAN83C185_EDPWRDOWN) {
                /* Disable energy detect mode for this SMSC Transceivers */
                rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
                               rc & (~MII_LAN83C185_EDPWRDOWN));
@@ -1370,8 +1402,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
                        SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
                        return rc;
                }
-
-               mdelay(1);
+               /* Allow PHY to wakeup */
+               mdelay(2);
        }
 
        return 0;
@@ -1393,7 +1425,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
 
        /* Only enable if energy detect mode is already disabled */
        if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
-               mdelay(100);
                /* Enable energy detect mode for this SMSC Transceivers */
                rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
                               rc | MII_LAN83C185_EDPWRDOWN);
@@ -1402,8 +1433,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
                        SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
                        return rc;
                }
-
-               mdelay(1);
        }
        return 0;
 }
@@ -1414,6 +1443,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
        unsigned int temp;
        int ret;
 
+       /*
+        * Make sure to power-up the PHY chip before doing a reset, otherwise
+        * the reset fails.
+        */
+       ret = smsc911x_phy_general_power_up(pdata);
+       if (ret) {
+               SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip");
+               return ret;
+       }
+
        /*
         * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that
         * are initialized in a Energy Detect Power-Down mode that prevents
index 6f77a46c7e2c446da3b3b4ae139057a621ef00ef..18c46bb0f3bfa296096ab2ec6624a4bd27bd304f 100644 (file)
@@ -276,6 +276,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
 bool stmmac_eee_init(struct stmmac_priv *priv)
 {
        char *phy_bus_name = priv->plat->phy_bus_name;
+       unsigned long flags;
        bool ret = false;
 
        /* Using PCS we cannot dial with the phy registers at this stage
@@ -300,6 +301,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                         * changed).
                         * In that case the driver disable own timers.
                         */
+                       spin_lock_irqsave(&priv->lock, flags);
                        if (priv->eee_active) {
                                pr_debug("stmmac: disable EEE\n");
                                del_timer_sync(&priv->eee_ctrl_timer);
@@ -307,9 +309,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                                                             tx_lpi_timer);
                        }
                        priv->eee_active = 0;
+                       spin_unlock_irqrestore(&priv->lock, flags);
                        goto out;
                }
                /* Activate the EEE and start timers */
+               spin_lock_irqsave(&priv->lock, flags);
                if (!priv->eee_active) {
                        priv->eee_active = 1;
                        init_timer(&priv->eee_ctrl_timer);
@@ -325,9 +329,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                /* Set HW EEE according to the speed */
                priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
 
-               pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
-
                ret = true;
+               spin_unlock_irqrestore(&priv->lock, flags);
+
+               pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
        }
 out:
        return ret;
@@ -760,12 +765,12 @@ static void stmmac_adjust_link(struct net_device *dev)
        if (new_state && netif_msg_link(priv))
                phy_print_status(phydev);
 
+       spin_unlock_irqrestore(&priv->lock, flags);
+
        /* At this stage, it could be needed to setup the EEE or adjust some
         * MAC related HW registers.
         */
        priv->eee_enabled = stmmac_eee_init(priv);
-
-       spin_unlock_irqrestore(&priv->lock, flags);
 }
 
 /**
@@ -959,12 +964,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
 }
 
 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
-                                 int i)
+                                 int i, gfp_t flags)
 {
        struct sk_buff *skb;
 
        skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
-                                GFP_KERNEL);
+                                flags);
        if (!skb) {
                pr_err("%s: Rx init fails; skb is NULL\n", __func__);
                return -ENOMEM;
@@ -1006,7 +1011,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
  * and allocates the socket buffers. It suppors the chained and ring
  * modes.
  */
-static int init_dma_desc_rings(struct net_device *dev)
+static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
 {
        int i;
        struct stmmac_priv *priv = netdev_priv(dev);
@@ -1041,7 +1046,7 @@ static int init_dma_desc_rings(struct net_device *dev)
                else
                        p = priv->dma_rx + i;
 
-               ret = stmmac_init_rx_buffers(priv, p, i);
+               ret = stmmac_init_rx_buffers(priv, p, i, flags);
                if (ret)
                        goto err_init_rx_buffers;
 
@@ -1647,11 +1652,6 @@ static int stmmac_hw_setup(struct net_device *dev)
        struct stmmac_priv *priv = netdev_priv(dev);
        int ret;
 
-       ret = init_dma_desc_rings(dev);
-       if (ret < 0) {
-               pr_err("%s: DMA descriptors initialization failed\n", __func__);
-               return ret;
-       }
        /* DMA initialization and SW reset */
        ret = stmmac_init_dma_engine(priv);
        if (ret < 0) {
@@ -1705,10 +1705,6 @@ static int stmmac_hw_setup(struct net_device *dev)
        }
        priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
 
-       priv->eee_enabled = stmmac_eee_init(priv);
-
-       stmmac_init_tx_coalesce(priv);
-
        if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
                priv->rx_riwt = MAX_DMA_RIWT;
                priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
@@ -1761,12 +1757,20 @@ static int stmmac_open(struct net_device *dev)
                goto dma_desc_error;
        }
 
+       ret = init_dma_desc_rings(dev, GFP_KERNEL);
+       if (ret < 0) {
+               pr_err("%s: DMA descriptors initialization failed\n", __func__);
+               goto init_error;
+       }
+
        ret = stmmac_hw_setup(dev);
        if (ret < 0) {
                pr_err("%s: Hw setup failed\n", __func__);
                goto init_error;
        }
 
+       stmmac_init_tx_coalesce(priv);
+
        if (priv->phydev)
                phy_start(priv->phydev);
 
@@ -1894,7 +1898,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned int nopaged_len = skb_headlen(skb);
        unsigned int enh_desc = priv->plat->enh_desc;
 
+       spin_lock(&priv->tx_lock);
+
        if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
+               spin_unlock(&priv->tx_lock);
                if (!netif_queue_stopped(dev)) {
                        netif_stop_queue(dev);
                        /* This is a hard error, log it. */
@@ -1903,8 +1910,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
        }
 
-       spin_lock(&priv->tx_lock);
-
        if (priv->tx_path_in_lpi_mode)
                stmmac_disable_eee_mode(priv);
 
@@ -2025,6 +2030,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 
 dma_map_err:
+       spin_unlock(&priv->tx_lock);
        dev_err(priv->device, "Tx dma map failed\n");
        dev_kfree_skb(skb);
        priv->dev->stats.tx_dropped++;
@@ -2281,9 +2287,7 @@ static void stmmac_set_rx_mode(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
 
-       spin_lock(&priv->lock);
        priv->hw->mac->set_filter(priv->hw, dev);
-       spin_unlock(&priv->lock);
 }
 
 /**
@@ -2950,7 +2954,7 @@ int stmmac_suspend(struct net_device *ndev)
                stmmac_set_mac(priv->ioaddr, false);
                pinctrl_pm_select_sleep_state(priv->device);
                /* Disable clock in case of PWM is off */
-               clk_disable_unprepare(priv->stmmac_clk);
+               clk_disable(priv->stmmac_clk);
        }
        spin_unlock_irqrestore(&priv->lock, flags);
 
@@ -2982,7 +2986,7 @@ int stmmac_resume(struct net_device *ndev)
        } else {
                pinctrl_pm_select_default_state(priv->device);
                /* enable the clk prevously disabled */
-               clk_prepare_enable(priv->stmmac_clk);
+               clk_enable(priv->stmmac_clk);
                /* reset the phy so that it's ready */
                if (priv->mii)
                        stmmac_mdio_reset(priv->mii);
@@ -2990,7 +2994,9 @@ int stmmac_resume(struct net_device *ndev)
 
        netif_device_attach(ndev);
 
+       init_dma_desc_rings(ndev, GFP_ATOMIC);
        stmmac_hw_setup(ndev);
+       stmmac_init_tx_coalesce(priv);
 
        napi_enable(&priv->napi);
 
index db56fa7ce8f91ae816b4733c501f60504c2ae228..58a1a0a423d494e2ce4fcc0861a02ff97e9077dc 100644 (file)
@@ -177,12 +177,6 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
         */
        plat->maxmtu = JUMBO_LEN;
 
-       /* Set default value for multicast hash bins */
-       plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
-       /* Set default value for unicast filter entries */
-       plat->unicast_filter_entries = 1;
-
        /*
         * Currently only the properties needed on SPEAr600
         * are provided. All other properties should be added
@@ -270,16 +264,23 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
                return PTR_ERR(addr);
 
        plat_dat = dev_get_platdata(&pdev->dev);
-       if (pdev->dev.of_node) {
-               if (!plat_dat)
-                       plat_dat = devm_kzalloc(&pdev->dev,
+
+       if (!plat_dat)
+               plat_dat = devm_kzalloc(&pdev->dev,
                                        sizeof(struct plat_stmmacenet_data),
                                        GFP_KERNEL);
-               if (!plat_dat) {
-                       pr_err("%s: ERROR: no memory", __func__);
-                       return  -ENOMEM;
-               }
+       if (!plat_dat) {
+               pr_err("%s: ERROR: no memory", __func__);
+               return  -ENOMEM;
+       }
+
+       /* Set default value for multicast hash bins */
+       plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
 
+       /* Set default value for unicast filter entries */
+       plat_dat->unicast_filter_entries = 1;
+
+       if (pdev->dev.of_node) {
                ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
                if (ret) {
                        pr_err("%s: main dt probe failed", __func__);
index 72c8525d5457b40af75c5c15d6b4bd1432185543..9c014803b03b2338ad2f97fd31794966cfc02b66 100644 (file)
@@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp)
        HMD(("init rxring, "));
        for (i = 0; i < RX_RING_SIZE; i++) {
                struct sk_buff *skb;
+               u32 mapping;
 
                skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
                if (!skb) {
@@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp)
 
                /* Because we reserve afterwards. */
                skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
+               mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
+                                        DMA_FROM_DEVICE);
+               if (dma_mapping_error(hp->dma_dev, mapping)) {
+                       dev_kfree_skb_any(skb);
+                       hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
+                       continue;
+               }
                hme_write_rxd(hp, &hb->happy_meal_rxd[i],
                              (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
-                             dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
-                                            DMA_FROM_DEVICE));
+                             mapping);
                skb_reserve(skb, RX_OFFSET);
        }
 
@@ -2020,6 +2027,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
                skb = hp->rx_skbs[elem];
                if (len > RX_COPY_THRESHOLD) {
                        struct sk_buff *new_skb;
+                       u32 mapping;
 
                        /* Now refill the entry, if we can. */
                        new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
@@ -2027,13 +2035,21 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
                                drops++;
                                goto drop_it;
                        }
+                       skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
+                       mapping = dma_map_single(hp->dma_dev, new_skb->data,
+                                                RX_BUF_ALLOC_SIZE,
+                                                DMA_FROM_DEVICE);
+                       if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
+                               dev_kfree_skb_any(new_skb);
+                               drops++;
+                               goto drop_it;
+                       }
+
                        dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
                        hp->rx_skbs[elem] = new_skb;
-                       skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
                        hme_write_rxd(hp, this,
                                      (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
-                                     dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE,
-                                                    DMA_FROM_DEVICE));
+                                     mapping);
                        skb_reserve(new_skb, RX_OFFSET);
 
                        /* Trim the original skb for the netif. */
@@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev)
        netif_wake_queue(dev);
 }
 
+static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
+                                u32 first_len, u32 first_entry, u32 entry)
+{
+       struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
+
+       dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
+
+       first_entry = NEXT_TX(first_entry);
+       while (first_entry != entry) {
+               struct happy_meal_txd *this = &txbase[first_entry];
+               u32 addr, len;
+
+               addr = hme_read_desc32(hp, &this->tx_addr);
+               len = hme_read_desc32(hp, &this->tx_flags);
+               len &= TXFLAG_SIZE;
+               dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
+       }
+}
+
 static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
                                         struct net_device *dev)
 {
@@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
 
                len = skb->len;
                mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
+                       goto out_dma_error;
                tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
                hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
                              (tx_flags | (len & TXFLAG_SIZE)),
@@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
                first_len = skb_headlen(skb);
                first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
                                               DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
+                       goto out_dma_error;
                entry = NEXT_TX(entry);
 
                for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
                        len = skb_frag_size(this_frag);
                        mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
                                                   0, len, DMA_TO_DEVICE);
+                       if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
+                               unmap_partial_tx_skb(hp, first_mapping, first_len,
+                                                    first_entry, entry);
+                               goto out_dma_error;
+                       }
                        this_txflags = tx_flags;
                        if (frag == skb_shinfo(skb)->nr_frags - 1)
                                this_txflags |= TXFLAG_EOP;
@@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
 
        tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
        return NETDEV_TX_OK;
+
+out_dma_error:
+       hp->tx_skbs[hp->tx_new] = NULL;
+       spin_unlock_irq(&hp->happy_lock);
+
+       dev_kfree_skb_any(skb);
+       dev->stats.tx_dropped++;
+       return NETDEV_TX_OK;
 }
 
 static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
index d8794488f80a78cbc4171c7ac57d942eb36ab521..c560f9aeb55d691f23c65dae362c18defa1e9e44 100644 (file)
@@ -129,9 +129,9 @@ do {                                                                \
 #define CPSW_VLAN_AWARE                BIT(1)
 #define CPSW_ALE_VLAN_AWARE    1
 
-#define CPSW_FIFO_NORMAL_MODE          (0 << 15)
-#define CPSW_FIFO_DUAL_MAC_MODE                (1 << 15)
-#define CPSW_FIFO_RATE_LIMIT_MODE      (2 << 15)
+#define CPSW_FIFO_NORMAL_MODE          (0 << 16)
+#define CPSW_FIFO_DUAL_MAC_MODE                (1 << 16)
+#define CPSW_FIFO_RATE_LIMIT_MODE      (2 << 16)
 
 #define CPSW_INTPACEEN         (0x3f << 16)
 #define CPSW_INTPRESCALE_MASK  (0x7FF << 0)
index 3ae83879a75f5eeb0c44cafcf362a661f1cb86ac..097ebe7077ac0c8de51e3eb7e8da5809f5e6bcea 100644 (file)
@@ -785,7 +785,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale)
 {
        if (!ale)
                return -EINVAL;
-       cpsw_ale_stop(ale);
        cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
        kfree(ale);
        return 0;
index ab92f67da035f2f5f9aaa8ea4effa87ca83a2a41..4a4388b813ac6b2392917da20f873ceb34297e39 100644 (file)
@@ -264,7 +264,7 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
 
        switch (ptp_class & PTP_CLASS_PMASK) {
        case PTP_CLASS_IPV4:
-               offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+               offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
                break;
        case PTP_CLASS_IPV6:
                offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
index 9ce854f43917ad723da924e7a99e3629c6a6e9bd..6cbc56ad9ff49256fa218e87c031c5c9f371d17a 100644 (file)
@@ -377,17 +377,20 @@ static int ieee802154fake_probe(struct platform_device *pdev)
 
        err = wpan_phy_register(phy);
        if (err)
-               goto out;
+               goto err_phy_reg;
 
        err = register_netdev(dev);
-       if (err < 0)
-               goto out;
+       if (err)
+               goto err_netdev_reg;
 
        dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n");
        return 0;
 
-out:
-       unregister_netdev(dev);
+err_netdev_reg:
+       wpan_phy_unregister(phy);
+err_phy_reg:
+       free_netdev(dev);
+       wpan_phy_free(phy);
        return err;
 }
 
index 6f226de655a40759874356c38352b928d1e425f3..880cc090dc44ecb0167682e951bedff7753aef94 100644 (file)
@@ -629,6 +629,8 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
                vnet_hdr->csum_start = skb_checksum_start_offset(skb);
+               if (vlan_tx_tag_present(skb))
+                       vnet_hdr->csum_start += VLAN_HLEN;
                vnet_hdr->csum_offset = skb->csum_offset;
        } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
                vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
index 2954052706e8bcbf0979e89772326656ba6c57fa..e22e602beef3426a600db641a66237f40c039732 100644 (file)
@@ -791,7 +791,7 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
 
        switch (type & PTP_CLASS_PMASK) {
        case PTP_CLASS_IPV4:
-               offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+               offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
                break;
        case PTP_CLASS_IPV6:
                offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
@@ -934,7 +934,7 @@ static int is_sync(struct sk_buff *skb, int type)
 
        switch (type & PTP_CLASS_PMASK) {
        case PTP_CLASS_IPV4:
-               offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+               offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
                break;
        case PTP_CLASS_IPV6:
                offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
index 1dfffdc9dfc3577bd6893688da191c38539e243f..767cd110f49688d2b4118ba4dffc1a373972e56d 100644 (file)
@@ -352,6 +352,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
 {
        struct mii_ioctl_data *mii_data = if_mii(ifr);
        u16 val = mii_data->val_in;
+       bool change_autoneg = false;
 
        switch (cmd) {
        case SIOCGMIIPHY:
@@ -367,22 +368,29 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
                if (mii_data->phy_id == phydev->addr) {
                        switch (mii_data->reg_num) {
                        case MII_BMCR:
-                               if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0)
+                               if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
+                                       if (phydev->autoneg == AUTONEG_ENABLE)
+                                               change_autoneg = true;
                                        phydev->autoneg = AUTONEG_DISABLE;
-                               else
+                                       if (val & BMCR_FULLDPLX)
+                                               phydev->duplex = DUPLEX_FULL;
+                                       else
+                                               phydev->duplex = DUPLEX_HALF;
+                                       if (val & BMCR_SPEED1000)
+                                               phydev->speed = SPEED_1000;
+                                       else if (val & BMCR_SPEED100)
+                                               phydev->speed = SPEED_100;
+                                       else phydev->speed = SPEED_10;
+                               }
+                               else {
+                                       if (phydev->autoneg == AUTONEG_DISABLE)
+                                               change_autoneg = true;
                                        phydev->autoneg = AUTONEG_ENABLE;
-                               if (!phydev->autoneg && (val & BMCR_FULLDPLX))
-                                       phydev->duplex = DUPLEX_FULL;
-                               else
-                                       phydev->duplex = DUPLEX_HALF;
-                               if (!phydev->autoneg && (val & BMCR_SPEED1000))
-                                       phydev->speed = SPEED_1000;
-                               else if (!phydev->autoneg &&
-                                        (val & BMCR_SPEED100))
-                                       phydev->speed = SPEED_100;
+                               }
                                break;
                        case MII_ADVERTISE:
-                               phydev->advertising = val;
+                               phydev->advertising = mii_adv_to_ethtool_adv_t(val);
+                               change_autoneg = true;
                                break;
                        default:
                                /* do nothing */
@@ -396,6 +404,10 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
                if (mii_data->reg_num == MII_BMCR &&
                    val & BMCR_RESET)
                        return phy_init_hw(phydev);
+
+               if (change_autoneg)
+                       return phy_start_aneg(phydev);
+
                return 0;
 
        case SIOCSHWTSTAMP:
index 68c3a3f4e0abe54910923aca8aa64286366e131a..794a4732936883c662212e0769e62993e67d96b0 100644 (file)
@@ -755,23 +755,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
                err = get_filter(argp, &code);
                if (err >= 0) {
+                       struct bpf_prog *pass_filter = NULL;
                        struct sock_fprog_kern fprog = {
                                .len = err,
                                .filter = code,
                        };
 
-                       ppp_lock(ppp);
-                       if (ppp->pass_filter) {
-                               bpf_prog_destroy(ppp->pass_filter);
-                               ppp->pass_filter = NULL;
+                       err = 0;
+                       if (fprog.filter)
+                               err = bpf_prog_create(&pass_filter, &fprog);
+                       if (!err) {
+                               ppp_lock(ppp);
+                               if (ppp->pass_filter)
+                                       bpf_prog_destroy(ppp->pass_filter);
+                               ppp->pass_filter = pass_filter;
+                               ppp_unlock(ppp);
                        }
-                       if (fprog.filter != NULL)
-                               err = bpf_prog_create(&ppp->pass_filter,
-                                                     &fprog);
-                       else
-                               err = 0;
                        kfree(code);
-                       ppp_unlock(ppp);
                }
                break;
        }
@@ -781,23 +781,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
                err = get_filter(argp, &code);
                if (err >= 0) {
+                       struct bpf_prog *active_filter = NULL;
                        struct sock_fprog_kern fprog = {
                                .len = err,
                                .filter = code,
                        };
 
-                       ppp_lock(ppp);
-                       if (ppp->active_filter) {
-                               bpf_prog_destroy(ppp->active_filter);
-                               ppp->active_filter = NULL;
+                       err = 0;
+                       if (fprog.filter)
+                               err = bpf_prog_create(&active_filter, &fprog);
+                       if (!err) {
+                               ppp_lock(ppp);
+                               if (ppp->active_filter)
+                                       bpf_prog_destroy(ppp->active_filter);
+                               ppp->active_filter = active_filter;
+                               ppp_unlock(ppp);
                        }
-                       if (fprog.filter != NULL)
-                               err = bpf_prog_create(&ppp->active_filter,
-                                                     &fprog);
-                       else
-                               err = 0;
                        kfree(code);
-                       ppp_unlock(ppp);
                }
                break;
        }
index 1aff970be33ec88ec8ff6781467155f68cf5b3b4..1dc628ffce2b52a565354f060dca84467d67e4b9 100644 (file)
@@ -506,7 +506,9 @@ static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
        int len = sizeof(struct sockaddr_pppox);
        struct sockaddr_pppox sp;
 
-       sp.sa_family      = AF_PPPOX;
+       memset(&sp.sa_addr, 0, sizeof(sp.sa_addr));
+
+       sp.sa_family    = AF_PPPOX;
        sp.sa_protocol  = PX_PROTO_PPTP;
        sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
 
index 7302398f0b1fff89ffe4a4e373e936c928180645..9dd3746994a42cb73c585848876ff3e878963f24 100644 (file)
@@ -1235,12 +1235,20 @@ static ssize_t tun_put_user(struct tun_struct *tun,
        struct tun_pi pi = { 0, skb->protocol };
        ssize_t total = 0;
        int vlan_offset = 0, copied;
+       int vlan_hlen = 0;
+       int vnet_hdr_sz = 0;
+
+       if (vlan_tx_tag_present(skb))
+               vlan_hlen = VLAN_HLEN;
+
+       if (tun->flags & TUN_VNET_HDR)
+               vnet_hdr_sz = tun->vnet_hdr_sz;
 
        if (!(tun->flags & TUN_NO_PI)) {
                if ((len -= sizeof(pi)) < 0)
                        return -EINVAL;
 
-               if (len < skb->len) {
+               if (len < skb->len + vlan_hlen + vnet_hdr_sz) {
                        /* Packet will be striped */
                        pi.flags |= TUN_PKT_STRIP;
                }
@@ -1250,9 +1258,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                total += sizeof(pi);
        }
 
-       if (tun->flags & TUN_VNET_HDR) {
+       if (vnet_hdr_sz) {
                struct virtio_net_hdr gso = { 0 }; /* no info leak */
-               if ((len -= tun->vnet_hdr_sz) < 0)
+               if ((len -= vnet_hdr_sz) < 0)
                        return -EINVAL;
 
                if (skb_is_gso(skb)) {
@@ -1284,7 +1292,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
 
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
                        gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-                       gso.csum_start = skb_checksum_start_offset(skb);
+                       gso.csum_start = skb_checksum_start_offset(skb) +
+                                        vlan_hlen;
                        gso.csum_offset = skb->csum_offset;
                } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
                        gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
@@ -1293,14 +1302,13 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
                                               sizeof(gso))))
                        return -EFAULT;
-               total += tun->vnet_hdr_sz;
+               total += vnet_hdr_sz;
        }
 
        copied = total;
-       total += skb->len;
-       if (!vlan_tx_tag_present(skb)) {
-               len = min_t(int, skb->len, len);
-       } else {
+       len = min_t(int, skb->len + vlan_hlen, len);
+       total += skb->len + vlan_hlen;
+       if (vlan_hlen) {
                int copy, ret;
                struct {
                        __be16 h_vlan_proto;
@@ -1311,8 +1319,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
 
                vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
-               len = min_t(int, skb->len + VLAN_HLEN, len);
-               total += VLAN_HLEN;
 
                copy = min_t(int, vlan_offset, len);
                ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
index 2c05f6cdb12f3a1e2ae9bed29c6241cee7dc3a46..816d511e34d33065a80cd171bab45ec25b8af3db 100644 (file)
@@ -465,19 +465,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
                return ret;
        }
 
-       ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
-       if (ret < 0)
-               return ret;
-
-       msleep(150);
-
-       ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
-       if (ret < 0)
-               return ret;
-
-       msleep(150);
-
-       ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
+       ax88772_reset(dev);
 
        /* Read PHYID register *AFTER* the PHY was reset properly */
        phyid = asix_get_phyid(dev);
index 22756db53dcacc3138fd000cad8dbda968948fb8..b8a82b86f909095632c7d5747b9bf25cb81c970e 100644 (file)
@@ -780,6 +780,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81a4, 8)},    /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a8, 8)},    /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a9, 8)},    /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x03f0, 0x581d, 4)},    /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
index ec2a8b41ed41a1484c697f1897e0286f5d5d845a..b0bc8ead47de240c8f27605488eb300d0445c4fb 100644 (file)
@@ -1673,6 +1673,40 @@ static const struct attribute_group virtio_net_mrg_rx_group = {
 };
 #endif
 
+static bool virtnet_fail_on_feature(struct virtio_device *vdev,
+                                   unsigned int fbit,
+                                   const char *fname, const char *dname)
+{
+       if (!virtio_has_feature(vdev, fbit))
+               return false;
+
+       dev_err(&vdev->dev, "device advertises feature %s but not %s",
+               fname, dname);
+
+       return true;
+}
+
+#define VIRTNET_FAIL_ON(vdev, fbit, dbit)                      \
+       virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
+
+static bool virtnet_validate_features(struct virtio_device *vdev)
+{
+       if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
+           (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
+                            "VIRTIO_NET_F_CTRL_VQ") ||
+            VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
+                            "VIRTIO_NET_F_CTRL_VQ") ||
+            VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
+                            "VIRTIO_NET_F_CTRL_VQ") ||
+            VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
+            VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
+                            "VIRTIO_NET_F_CTRL_VQ"))) {
+               return false;
+       }
+
+       return true;
+}
+
 static int virtnet_probe(struct virtio_device *vdev)
 {
        int i, err;
@@ -1680,6 +1714,9 @@ static int virtnet_probe(struct virtio_device *vdev)
        struct virtnet_info *vi;
        u16 max_queue_pairs;
 
+       if (!virtnet_validate_features(vdev))
+               return -EINVAL;
+
        /* Find if host supports multiqueue virtio_net device */
        err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
                                   struct virtio_net_config,
index ca309820d39e1ba7995f38d3a2f9bacbd1c1f857..be4649a49c5e8bb2bec68aca08d27a82f5563d40 100644 (file)
 
 #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
 
-/* VXLAN protocol header */
-struct vxlanhdr {
-       __be32 vx_flags;
-       __be32 vx_vni;
-};
-
 /* UDP port for VXLAN traffic.
  * The IANA assigned port is 4789, but the Linux default is 8472
  * for compatibility with early adopters.
@@ -275,13 +269,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
        return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
 }
 
-/* Find VXLAN socket based on network namespace and UDP port */
-static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
+/* Find VXLAN socket based on network namespace, address family and UDP port */
+static struct vxlan_sock *vxlan_find_sock(struct net *net,
+                                         sa_family_t family, __be16 port)
 {
        struct vxlan_sock *vs;
 
        hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
-               if (inet_sk(vs->sock->sk)->inet_sport == port)
+               if (inet_sk(vs->sock->sk)->inet_sport == port &&
+                   inet_sk(vs->sock->sk)->sk.sk_family == family)
                        return vs;
        }
        return NULL;
@@ -300,11 +296,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
 }
 
 /* Look up VNI in a per net namespace table */
-static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
+static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
+                                       sa_family_t family, __be16 port)
 {
        struct vxlan_sock *vs;
 
-       vs = vxlan_find_sock(net, port);
+       vs = vxlan_find_sock(net, family, port);
        if (!vs)
                return NULL;
 
@@ -621,6 +618,8 @@ static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
        int vxlan_len  = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
        int err = -ENOSYS;
 
+       udp_tunnel_gro_complete(skb, nhoff);
+
        eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
        type = eh->h_proto;
 
@@ -1771,7 +1770,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        struct vxlan_dev *dst_vxlan;
 
                        ip_rt_put(rt);
-                       dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
+                       dst_vxlan = vxlan_find_vni(vxlan->net, vni,
+                                                  dst->sa.sa_family, dst_port);
                        if (!dst_vxlan)
                                goto tx_error;
                        vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1825,7 +1825,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        struct vxlan_dev *dst_vxlan;
 
                        dst_release(ndst);
-                       dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
+                       dst_vxlan = vxlan_find_vni(vxlan->net, vni,
+                                                  dst->sa.sa_family, dst_port);
                        if (!dst_vxlan)
                                goto tx_error;
                        vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1985,13 +1986,15 @@ static int vxlan_init(struct net_device *dev)
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
        struct vxlan_sock *vs;
+       bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
 
        dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
 
        spin_lock(&vn->sock_lock);
-       vs = vxlan_find_sock(vxlan->net, vxlan->dst_port);
+       vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
+                            vxlan->dst_port);
        if (vs) {
                /* If we have a socket with same port already, reuse it */
                atomic_inc(&vs->refcnt);
@@ -2303,9 +2306,9 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
        if (ipv6) {
                udp_conf.family = AF_INET6;
                udp_conf.use_udp6_tx_checksums =
-                   !!(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+                   !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
                udp_conf.use_udp6_rx_checksums =
-                   !!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
+                   !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
        } else {
                udp_conf.family = AF_INET;
                udp_conf.local_ip.s_addr = INADDR_ANY;
@@ -2382,6 +2385,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_sock *vs;
+       bool ipv6 = flags & VXLAN_F_IPV6;
 
        vs = vxlan_socket_create(net, port, rcv, data, flags);
        if (!IS_ERR(vs))
@@ -2391,7 +2395,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
                return vs;
 
        spin_lock(&vn->sock_lock);
-       vs = vxlan_find_sock(net, port);
+       vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
        if (vs) {
                if (vs->rcv == rcv)
                        atomic_inc(&vs->refcnt);
@@ -2550,7 +2554,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
            nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
                vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
 
-       if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
+       if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
+                          vxlan->dst_port)) {
                pr_info("duplicate VNI %u\n", vni);
                return -EEXIST;
        }
index 697c4ae90af006f9c7f962bd21ea938af1892455..1e8ea5e4d4ca71ecd7598aea30d5a76f4749e85a 100644 (file)
@@ -664,6 +664,19 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
                ah->enabled_cals |= TX_CL_CAL;
        else
                ah->enabled_cals &= ~TX_CL_CAL;
+
+       if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) {
+               if (ah->is_clk_25mhz) {
+                       REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
+                       REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
+                       REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae);
+               } else {
+                       REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
+                       REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
+                       REG_WRITE(ah, AR_SLP32_INC, 0x0001e800);
+               }
+               udelay(100);
+       }
 }
 
 static void ar9003_hw_prog_ini(struct ath_hw *ah,
index 8be4b145339426b7ea8a47a4788c9dd6be04b238..2ad605760e2136584a56c25680cb14324fa3d30c 100644 (file)
@@ -861,19 +861,6 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
        udelay(RTC_PLL_SETTLE_DELAY);
 
        REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
-
-       if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
-               if (ah->is_clk_25mhz) {
-                       REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
-                       REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
-                       REG_WRITE(ah,  AR_SLP32_INC, 0x0001e7ae);
-               } else {
-                       REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
-                       REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
-                       REG_WRITE(ah,  AR_SLP32_INC, 0x0001e800);
-               }
-               udelay(100);
-       }
 }
 
 static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
index 30c66dfcd7a04b8489186c82db741b87f0afbad0..4f18a6be0c7d706092998c15bb290b3f6ddcdbc1 100644 (file)
@@ -974,9 +974,8 @@ void ath9k_calculate_iter_data(struct ath_softc *sc,
        struct ath_vif *avp;
 
        /*
-        * Pick the MAC address of the first interface as the new hardware
-        * MAC address. The hardware will use it together with the BSSID mask
-        * when matching addresses.
+        * The hardware will use primary station addr together with the
+        * BSSID mask when matching addresses.
         */
        memset(iter_data, 0, sizeof(*iter_data));
        memset(&iter_data->mask, 0xff, ETH_ALEN);
@@ -1205,6 +1204,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
                list_add_tail(&avp->list, &avp->chanctx->vifs);
        }
 
+       ath9k_calculate_summary_state(sc, avp->chanctx);
+
        ath9k_assign_hw_queues(hw, vif);
 
        an->sc = sc;
@@ -1274,6 +1275,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
 
        ath_tx_node_cleanup(sc, &avp->mcast_node);
 
+       ath9k_calculate_summary_state(sc, avp->chanctx);
+
        mutex_unlock(&sc->mutex);
 }
 
index 1dfc682a805513f5e5d160f6198f6d17bec644fc..ee27b06074e1213f68bb2e3f6bb632b61aeb5fd5 100644 (file)
@@ -300,9 +300,7 @@ void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value)
 
 void b43_phy_copy(struct b43_wldev *dev, u16 destreg, u16 srcreg)
 {
-       assert_mac_suspended(dev);
-       dev->phy.ops->phy_write(dev, destreg,
-               dev->phy.ops->phy_read(dev, srcreg));
+       b43_phy_write(dev, destreg, b43_phy_read(dev, srcreg));
 }
 
 void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask)
index f05f5270fec1095df589b96490308bfe5193568a..927bffd5be6487125ca5f3e269b5eb0d690fb2f7 100644 (file)
@@ -40,8 +40,8 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
                return;
 
        irq = irq_of_parse_and_map(np, 0);
-       if (irq < 0) {
-               brcmf_err("interrupt could not be mapped: err=%d\n", irq);
+       if (!irq) {
+               brcmf_err("interrupt could not be mapped\n");
                devm_kfree(dev, sdiodev->pdata);
                return;
        }
index 8c0632ec9f7a6041e72a0a3c6e6243a00955d97f..16fef3382019192d90462835625011075b33a2ec 100644 (file)
 #include <linux/pci.h>
 #include <linux/vmalloc.h>
 #include <linux/delay.h>
-#include <linux/unaligned/access_ok.h>
 #include <linux/interrupt.h>
 #include <linux/bcma/bcma.h>
 #include <linux/sched.h>
+#include <asm/unaligned.h>
 
 #include <soc.h>
 #include <chipcommon.h>
index dc135915470d2cb70efab5480d11a036a0e00d3f..875d1142c8b0f5f65111a8674c7017a7e895721d 100644 (file)
@@ -669,10 +669,12 @@ static int brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
                goto finalize;
        }
 
-       if (!brcmf_usb_ioctl_resp_wait(devinfo))
+       if (!brcmf_usb_ioctl_resp_wait(devinfo)) {
+               usb_kill_urb(devinfo->ctl_urb);
                ret = -ETIMEDOUT;
-       else
+       } else {
                memcpy(buffer, tmpbuf, buflen);
+       }
 
 finalize:
        kfree(tmpbuf);
index 28fa25b509db8c3154d2ff0220d4b05c5f83c99d..39b45c038a93ec2a538665bbe90498230493ec42 100644 (file)
@@ -299,6 +299,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
        primary_offset = ch->center_freq1 - ch->chan->center_freq;
        switch (ch->width) {
        case NL80211_CHAN_WIDTH_20:
+       case NL80211_CHAN_WIDTH_20_NOHT:
                ch_inf.bw = BRCMU_CHAN_BW_20;
                WARN_ON(primary_offset != 0);
                break;
@@ -323,6 +324,10 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
                                ch_inf.sb = BRCMU_CHAN_SB_LU;
                }
                break;
+       case NL80211_CHAN_WIDTH_80P80:
+       case NL80211_CHAN_WIDTH_160:
+       case NL80211_CHAN_WIDTH_5:
+       case NL80211_CHAN_WIDTH_10:
        default:
                WARN_ON_ONCE(1);
        }
@@ -333,6 +338,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
        case IEEE80211_BAND_5GHZ:
                ch_inf.band = BRCMU_CHAN_BAND_5G;
                break;
+       case IEEE80211_BAND_60GHZ:
        default:
                WARN_ON_ONCE(1);
        }
index 4f6e66892acc4658473aed57fc4c6cc72dd33fc4..b894a84e8393062a113102c8bbe96cf4b28f4f24 100644 (file)
@@ -155,6 +155,7 @@ enum iwl_ucode_tlv_api {
  * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
  * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
  *     which also implies support for the scheduler configuration command
+ * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
  */
 enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_D0I3_SUPPORT                 = BIT(0),
@@ -163,6 +164,7 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT       = BIT(10),
        IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT         = BIT(11),
        IWL_UCODE_TLV_CAPA_DQA_SUPPORT                  = BIT(12),
+       IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT              = BIT(18),
 };
 
 /* The default calibrate table size if not specified by firmware file */
index e0d9f19650b07e5df8b56342ec5e6d1bef88194f..eb03943f846326207061a2ed63e95df09af93caf 100644 (file)
@@ -284,7 +284,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (WARN_ON_ONCE(mvm->init_ucode_complete))
+       if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating))
                return 0;
 
        iwl_init_notification_wait(&mvm->notif_wait,
@@ -334,6 +334,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
                goto out;
        }
 
+       mvm->calibrating = true;
+
        /* Send TX valid antennas before triggering calibrations */
        ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
        if (ret)
@@ -358,11 +360,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
                        MVM_UCODE_CALIB_TIMEOUT);
        if (!ret)
                mvm->init_ucode_complete = true;
+
+       if (ret && iwl_mvm_is_radio_killed(mvm)) {
+               IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
+               ret = 1;
+       }
        goto out;
 
 error:
        iwl_remove_notification(&mvm->notif_wait, &calib_wait);
 out:
+       mvm->calibrating = false;
        if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
                /* we want to debug INIT and we have no NVM - fake */
                mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
index 585fe5b7100fb8750bed29eda65d91121422497c..b6d2683da3a96dab9c53d0c6f291ab69de6e9e8f 100644 (file)
@@ -788,6 +788,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
 
        mvm->scan_status = IWL_MVM_SCAN_NONE;
        mvm->ps_disabled = false;
+       mvm->calibrating = false;
 
        /* just in case one was running */
        ieee80211_remain_on_channel_expired(mvm->hw);
@@ -2447,9 +2448,15 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
 
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
-               /* Use aux roc framework (HS20) */
-               ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
-                                              vif, duration);
+               if (mvm->fw->ucode_capa.capa[0] &
+                   IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) {
+                       /* Use aux roc framework (HS20) */
+                       ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
+                                                      vif, duration);
+                       goto out_unlock;
+               }
+               IWL_ERR(mvm, "hotspot not supported\n");
+               ret = -EINVAL;
                goto out_unlock;
        case NL80211_IFTYPE_P2P_DEVICE:
                /* handle below */
index b153ced7015bfef8984a9ecb78f844d4533ed7d9..845429c88cf403fdaffee73951fa99251b619137 100644 (file)
@@ -548,6 +548,7 @@ struct iwl_mvm {
        enum iwl_ucode_type cur_ucode;
        bool ucode_loaded;
        bool init_ucode_complete;
+       bool calibrating;
        u32 error_event_table;
        u32 log_event_table;
        u32 umac_error_event_table;
index 48cb25a93591a44678fd051e390dc15c02078744..5b719ee8e789075e6e7517512ca311cb6f9b6121 100644 (file)
@@ -424,6 +424,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        }
        mvm->sf_state = SF_UNINIT;
        mvm->low_latency_agg_frame_limit = 6;
+       mvm->cur_ucode = IWL_UCODE_INIT;
 
        mutex_init(&mvm->mutex);
        mutex_init(&mvm->d0i3_suspend_mutex);
@@ -752,6 +753,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
 static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
 {
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+       bool calibrating = ACCESS_ONCE(mvm->calibrating);
 
        if (state)
                set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
@@ -760,7 +762,15 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
 
        wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
 
-       return state && mvm->cur_ucode != IWL_UCODE_INIT;
+       /* iwl_run_init_mvm_ucode is waiting for results, abort it */
+       if (calibrating)
+               iwl_abort_notification_waits(&mvm->notif_wait);
+
+       /*
+        * Stop the device if we run OPERATIONAL firmware or if we are in the
+        * middle of the calibrations.
+        */
+       return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating);
 }
 
 static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
index b280d5d87127e87ea80f1eb36a477dad3dfcc6e2..7554f705383063fa3c7c3610595e9e39f0cc9af8 100644 (file)
@@ -602,16 +602,6 @@ static int iwl_mvm_cancel_regular_scan(struct iwl_mvm *mvm)
                                               SCAN_COMPLETE_NOTIFICATION };
        int ret;
 
-       if (mvm->scan_status == IWL_MVM_SCAN_NONE)
-               return 0;
-
-       if (iwl_mvm_is_radio_killed(mvm)) {
-               ieee80211_scan_completed(mvm->hw, true);
-               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-               mvm->scan_status = IWL_MVM_SCAN_NONE;
-               return 0;
-       }
-
        iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
                                   scan_abort_notif,
                                   ARRAY_SIZE(scan_abort_notif),
@@ -1400,6 +1390,16 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
 
 int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
 {
+       if (mvm->scan_status == IWL_MVM_SCAN_NONE)
+               return 0;
+
+       if (iwl_mvm_is_radio_killed(mvm)) {
+               ieee80211_scan_completed(mvm->hw, true);
+               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+               mvm->scan_status = IWL_MVM_SCAN_NONE;
+               return 0;
+       }
+
        if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
                return iwl_mvm_scan_offload_stop(mvm, true);
        return iwl_mvm_cancel_regular_scan(mvm);
index 3781b029e54a328f6304bfb6cf61d52c053cbce7..dd2f3f8baa9dbea639005b3b65dbbbda9109408f 100644 (file)
@@ -915,7 +915,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
         * restart. So don't process again if the device is
         * already dead.
         */
-       if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+       if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+               IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
                iwl_pcie_tx_stop(trans);
                iwl_pcie_rx_stop(trans);
 
@@ -945,7 +946,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
        /* clear all status bits */
        clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
        clear_bit(STATUS_INT_ENABLED, &trans->status);
-       clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
        clear_bit(STATUS_TPOWER_PMI, &trans->status);
        clear_bit(STATUS_RFKILL, &trans->status);
 
@@ -1894,8 +1894,7 @@ static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans,
                int reg;
                __le32 *val;
 
-               prph_len += sizeof(*data) + sizeof(*prph) +
-                       num_bytes_in_chunk;
+               prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk;
 
                (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
                (*data)->len = cpu_to_le32(sizeof(*prph) +
index babbdc1ce741c62024bc6220fdcf2a7bd3487790..c9ad4cf1adfb4b1d7a30068c2546ac7354345139 100644 (file)
@@ -1987,7 +1987,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
        if (err != 0) {
                printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n",
                       err);
-               goto failed_hw;
+               goto failed_bind;
        }
 
        skb_queue_head_init(&data->pending);
@@ -2183,6 +2183,8 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
        return idx;
 
 failed_hw:
+       device_release_driver(data->dev);
+failed_bind:
        device_unregister(data->dev);
 failed_drvdata:
        ieee80211_free_hw(hw);
index 8e68f87ab13c3081f062acc69fb71f59601e836f..66ff36447b9473799e27899bf44c3c3c8eb3994b 100644 (file)
@@ -158,55 +158,29 @@ void rt2x00queue_align_frame(struct sk_buff *skb)
        skb_trim(skb, frame_length);
 }
 
-void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
+/*
+ * H/W needs L2 padding between the header and the paylod if header size
+ * is not 4 bytes aligned.
+ */
+void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len)
 {
-       unsigned int payload_length = skb->len - header_length;
-       unsigned int header_align = ALIGN_SIZE(skb, 0);
-       unsigned int payload_align = ALIGN_SIZE(skb, header_length);
-       unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
+       unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
 
-       /*
-        * Adjust the header alignment if the payload needs to be moved more
-        * than the header.
-        */
-       if (payload_align > header_align)
-               header_align += 4;
-
-       /* There is nothing to do if no alignment is needed */
-       if (!header_align)
+       if (!l2pad)
                return;
 
-       /* Reserve the amount of space needed in front of the frame */
-       skb_push(skb, header_align);
-
-       /*
-        * Move the header.
-        */
-       memmove(skb->data, skb->data + header_align, header_length);
-
-       /* Move the payload, if present and if required */
-       if (payload_length && payload_align)
-               memmove(skb->data + header_length + l2pad,
-                       skb->data + header_length + l2pad + payload_align,
-                       payload_length);
-
-       /* Trim the skb to the correct size */
-       skb_trim(skb, header_length + l2pad + payload_length);
+       skb_push(skb, l2pad);
+       memmove(skb->data, skb->data + l2pad, hdr_len);
 }
 
-void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
+void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len)
 {
-       /*
-        * L2 padding is only present if the skb contains more than just the
-        * IEEE 802.11 header.
-        */
-       unsigned int l2pad = (skb->len > header_length) ?
-                               L2PAD_SIZE(header_length) : 0;
+       unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
 
        if (!l2pad)
                return;
 
-       memmove(skb->data + l2pad, skb->data, header_length);
+       memmove(skb->data + l2pad, skb->data, hdr_len);
        skb_pull(skb, l2pad);
 }
 
index 25daa8715219c6b89f110bb048cace12e3414159..846a2e6e34d855d62726eda65b51ee427bc1a939 100644 (file)
@@ -842,7 +842,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                        break;
                }
                /* handle command packet here */
-               if (rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) {
+               if (rtlpriv->cfg->ops->rx_command_packet &&
+                   rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) {
                                dev_kfree_skb_any(skb);
                                goto end;
                }
@@ -1127,9 +1128,14 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
 
        __skb_queue_tail(&ring->queue, pskb);
 
-       rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN,
-                                   &temp_one);
-
+       if (rtlpriv->use_new_trx_flow) {
+               temp_one = 4;
+               rtlpriv->cfg->ops->set_desc(hw, (u8 *)pbuffer_desc, true,
+                                           HW_DESC_OWN, (u8 *)&temp_one);
+       } else {
+               rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN,
+                                           &temp_one);
+       }
        return;
 }
 
@@ -1370,9 +1376,9 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
        ring->desc = NULL;
        if (rtlpriv->use_new_trx_flow) {
                pci_free_consistent(rtlpci->pdev,
-                                   sizeof(*ring->desc) * ring->entries,
+                                   sizeof(*ring->buffer_desc) * ring->entries,
                                    ring->buffer_desc, ring->buffer_desc_dma);
-               ring->desc = NULL;
+               ring->buffer_desc = NULL;
        }
 }
 
@@ -1543,7 +1549,6 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
                                                         true,
                                                         HW_DESC_TXBUFF_ADDR),
                                                 skb->len, PCI_DMA_TODEVICE);
-                               ring->idx = (ring->idx + 1) % ring->entries;
                                kfree_skb(skb);
                                ring->idx = (ring->idx + 1) % ring->entries;
                        }
@@ -2244,6 +2249,16 @@ int rtl_pci_probe(struct pci_dev *pdev,
        /*like read eeprom and so on */
        rtlpriv->cfg->ops->read_eeprom_info(hw);
 
+       if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
+               err = -ENODEV;
+               goto fail3;
+       }
+       rtlpriv->cfg->ops->init_sw_leds(hw);
+
+       /*aspm */
+       rtl_pci_init_aspm(hw);
+
        /* Init mac80211 sw */
        err = rtl_init_core(hw);
        if (err) {
@@ -2259,16 +2274,6 @@ int rtl_pci_probe(struct pci_dev *pdev,
                goto fail3;
        }
 
-       if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
-               err = -ENODEV;
-               goto fail3;
-       }
-       rtlpriv->cfg->ops->init_sw_leds(hw);
-
-       /*aspm */
-       rtl_pci_init_aspm(hw);
-
        err = ieee80211_register_hw(hw);
        if (err) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
index 00e067044c08d0a9cafa9009466eb22c21965d56..5761d5b49e39e4e9cb2ce703f39578ad1cf99e74 100644 (file)
@@ -1201,6 +1201,9 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
 
        }
 
+       if (type != NL80211_IFTYPE_AP &&
+           rtlpriv->mac80211.link_state < MAC80211_LINKED)
+               bt_msr = rtl_read_byte(rtlpriv, MSR) & ~MSR_LINK_MASK;
        rtl_write_byte(rtlpriv, (MSR), bt_msr);
 
        temp = rtl_read_dword(rtlpriv, TCR);
@@ -1262,6 +1265,7 @@ void rtl92se_enable_interrupt(struct ieee80211_hw *hw)
        rtl_write_dword(rtlpriv, INTA_MASK, rtlpci->irq_mask[0]);
        /* Support Bit 32-37(Assign as Bit 0-5) interrupt setting now */
        rtl_write_dword(rtlpriv, INTA_MASK + 4, rtlpci->irq_mask[1] & 0x3F);
+       rtlpci->irq_enabled = true;
 }
 
 void rtl92se_disable_interrupt(struct ieee80211_hw *hw)
@@ -1276,8 +1280,7 @@ void rtl92se_disable_interrupt(struct ieee80211_hw *hw)
        rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        rtl_write_dword(rtlpriv, INTA_MASK, 0);
        rtl_write_dword(rtlpriv, INTA_MASK + 4, 0);
-
-       synchronize_irq(rtlpci->pdev->irq);
+       rtlpci->irq_enabled = false;
 }
 
 static u8 _rtl92s_set_sysclk(struct ieee80211_hw *hw, u8 data)
index 77c5b5f352441a3a2a2d0c26b5627266aa15d5d9..4b4612fe2fdbdf318bad0b0a6e36873a6340d60b 100644 (file)
@@ -399,6 +399,8 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
                case 2:
                        currentcmd = &postcommoncmd[*step];
                        break;
+               default:
+                       return true;
                }
 
                if (currentcmd->cmdid == CMDID_END) {
index aadba29c167aff4c8ddf38db610db3cdfcbfcd93..fb003868bdef7ed9bbb775042a380190ce68162d 100644 (file)
@@ -236,6 +236,19 @@ static void rtl92s_deinit_sw_vars(struct ieee80211_hw *hw)
        }
 }
 
+static bool rtl92se_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue,
+                                     u16 index)
+{
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+       u8 *entry = (u8 *)(&ring->desc[ring->idx]);
+       u8 own = (u8)rtl92se_get_desc(entry, true, HW_DESC_OWN);
+
+       if (own)
+               return false;
+       return true;
+}
+
 static struct rtl_hal_ops rtl8192se_hal_ops = {
        .init_sw_vars = rtl92s_init_sw_vars,
        .deinit_sw_vars = rtl92s_deinit_sw_vars,
@@ -269,6 +282,7 @@ static struct rtl_hal_ops rtl8192se_hal_ops = {
        .led_control = rtl92se_led_control,
        .set_desc = rtl92se_set_desc,
        .get_desc = rtl92se_get_desc,
+       .is_tx_desc_closed = rtl92se_is_tx_desc_closed,
        .tx_polling = rtl92se_tx_polling,
        .enable_hw_sec = rtl92se_enable_hw_security_config,
        .set_key = rtl92se_set_key,
@@ -306,6 +320,8 @@ static struct rtl_hal_cfg rtl92se_hal_cfg = {
        .maps[MAC_RCR_ACRC32] = RCR_ACRC32,
        .maps[MAC_RCR_ACF] = RCR_ACF,
        .maps[MAC_RCR_AAP] = RCR_AAP,
+       .maps[MAC_HIMR] = INTA_MASK,
+       .maps[MAC_HIMRE] = INTA_MASK + 4,
 
        .maps[EFUSE_TEST] = REG_EFUSE_TEST,
        .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
index 310d3163dc5b6a3f1e51a9a59ed999fe991a2f06..8ec8200002c7311025b3ae645c9956bf08c44a17 100644 (file)
@@ -3672,8 +3672,9 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
                mac->opmode == NL80211_IFTYPE_ADHOC)
                macid = sta->aid + 1;
        if (wirelessmode == WIRELESS_MODE_N_5G ||
-           wirelessmode == WIRELESS_MODE_AC_5G)
-               ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ];
+           wirelessmode == WIRELESS_MODE_AC_5G ||
+           wirelessmode == WIRELESS_MODE_A)
+               ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ] << 4;
        else
                ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ];
 
index 4e56a27f9689a925ff3cf26118c6ee505eb963a4..fab0d4b42f58fca511dc447b62ea91607732ba81 100644 (file)
@@ -39,7 +39,7 @@ struct backend_info {
 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
 static void connect(struct backend_info *be);
 static int read_xenbus_vif_flags(struct backend_info *be);
-static void backend_create_xenvif(struct backend_info *be);
+static int backend_create_xenvif(struct backend_info *be);
 static void unregister_hotplug_status_watch(struct backend_info *be);
 static void set_backend_state(struct backend_info *be,
                              enum xenbus_state state);
@@ -352,7 +352,9 @@ static int netback_probe(struct xenbus_device *dev,
        be->state = XenbusStateInitWait;
 
        /* This kicks hotplug scripts, so do it immediately. */
-       backend_create_xenvif(be);
+       err = backend_create_xenvif(be);
+       if (err)
+               goto fail;
 
        return 0;
 
@@ -397,19 +399,19 @@ static int netback_uevent(struct xenbus_device *xdev,
 }
 
 
-static void backend_create_xenvif(struct backend_info *be)
+static int backend_create_xenvif(struct backend_info *be)
 {
        int err;
        long handle;
        struct xenbus_device *dev = be->dev;
 
        if (be->vif != NULL)
-               return;
+               return 0;
 
        err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
        if (err != 1) {
                xenbus_dev_fatal(dev, err, "reading handle");
-               return;
+               return (err < 0) ? err : -EINVAL;
        }
 
        be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
@@ -417,10 +419,11 @@ static void backend_create_xenvif(struct backend_info *be)
                err = PTR_ERR(be->vif);
                be->vif = NULL;
                xenbus_dev_fatal(dev, err, "creating interface");
-               return;
+               return err;
        }
 
        kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
+       return 0;
 }
 
 static void backend_disconnect(struct backend_info *be)
index cca871346a0ff5cfe22ffbcdce0f8c973cf59214..ece8d1804d13606c71b8f39e90851cb8b82de8ac 100644 (file)
@@ -496,9 +496,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
                len = skb_frag_size(frag);
                offset = frag->page_offset;
 
-               /* Data must not cross a page boundary. */
-               BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
-
                /* Skip unused frames from start of page */
                page += offset >> PAGE_SHIFT;
                offset &= ~PAGE_MASK;
@@ -506,8 +503,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
                while (len > 0) {
                        unsigned long bytes;
 
-                       BUG_ON(offset >= PAGE_SIZE);
-
                        bytes = PAGE_SIZE - offset;
                        if (bytes > len)
                                bytes = len;
index afdb78299f61f8d5465ec1d203d482d0674fe31d..06af494184d657456df3c353b9e61e380ec98dec 100644 (file)
@@ -450,6 +450,21 @@ static struct of_bus *of_match_bus(struct device_node *np)
        return NULL;
 }
 
+static int of_empty_ranges_quirk(void)
+{
+       if (IS_ENABLED(CONFIG_PPC)) {
+               /* To save cycles, we cache the result */
+               static int quirk_state = -1;
+
+               if (quirk_state < 0)
+                       quirk_state =
+                               of_machine_is_compatible("Power Macintosh") ||
+                               of_machine_is_compatible("MacRISC");
+               return quirk_state;
+       }
+       return false;
+}
+
 static int of_translate_one(struct device_node *parent, struct of_bus *bus,
                            struct of_bus *pbus, __be32 *addr,
                            int na, int ns, int pna, const char *rprop)
@@ -475,12 +490,10 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
         * This code is only enabled on powerpc. --gcl
         */
        ranges = of_get_property(parent, rprop, &rlen);
-#if !defined(CONFIG_PPC)
-       if (ranges == NULL) {
+       if (ranges == NULL && !of_empty_ranges_quirk()) {
                pr_err("OF: no ranges; cannot translate\n");
                return 1;
        }
-#endif /* !defined(CONFIG_PPC) */
        if (ranges == NULL || rlen == 0) {
                offset = of_read_number(addr, na);
                memset(addr, 0, pna * 4);
index 2305dc0382bca0a5c86bc89f5335253de46ff109..3823edf2d0120d9e89a8195f4845c7d144bcefcf 100644 (file)
@@ -1279,52 +1279,6 @@ int of_property_read_string(struct device_node *np, const char *propname,
 }
 EXPORT_SYMBOL_GPL(of_property_read_string);
 
-/**
- * of_property_read_string_index - Find and read a string from a multiple
- * strings property.
- * @np:                device node from which the property value is to be read.
- * @propname:  name of the property to be searched.
- * @index:     index of the string in the list of strings
- * @out_string:        pointer to null terminated return string, modified only if
- *             return value is 0.
- *
- * Search for a property in a device tree node and retrieve a null
- * terminated string value (pointer to data, not a copy) in the list of strings
- * contained in that property.
- * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
- * property does not have a value, and -EILSEQ if the string is not
- * null-terminated within the length of the property data.
- *
- * The out_string pointer is modified only if a valid string can be decoded.
- */
-int of_property_read_string_index(struct device_node *np, const char *propname,
-                                 int index, const char **output)
-{
-       struct property *prop = of_find_property(np, propname, NULL);
-       int i = 0;
-       size_t l = 0, total = 0;
-       const char *p;
-
-       if (!prop)
-               return -EINVAL;
-       if (!prop->value)
-               return -ENODATA;
-       if (strnlen(prop->value, prop->length) >= prop->length)
-               return -EILSEQ;
-
-       p = prop->value;
-
-       for (i = 0; total < prop->length; total += l, p += l) {
-               l = strlen(p) + 1;
-               if (i++ == index) {
-                       *output = p;
-                       return 0;
-               }
-       }
-       return -ENODATA;
-}
-EXPORT_SYMBOL_GPL(of_property_read_string_index);
-
 /**
  * of_property_match_string() - Find string in a list and return index
  * @np: pointer to node containing string list property
@@ -1351,7 +1305,7 @@ int of_property_match_string(struct device_node *np, const char *propname,
        end = p + prop->length;
 
        for (i = 0; p < end; i++, p += l) {
-               l = strlen(p) + 1;
+               l = strnlen(p, end - p) + 1;
                if (p + l > end)
                        return -EILSEQ;
                pr_debug("comparing %s with %s\n", string, p);
@@ -1363,39 +1317,41 @@ int of_property_match_string(struct device_node *np, const char *propname,
 EXPORT_SYMBOL_GPL(of_property_match_string);
 
 /**
- * of_property_count_strings - Find and return the number of strings from a
- * multiple strings property.
+ * of_property_read_string_util() - Utility helper for parsing string properties
  * @np:                device node from which the property value is to be read.
  * @propname:  name of the property to be searched.
+ * @out_strs:  output array of string pointers.
+ * @sz:                number of array elements to read.
+ * @skip:      Number of strings to skip over at beginning of list.
  *
- * Search for a property in a device tree node and retrieve the number of null
- * terminated string contain in it. Returns the number of strings on
- * success, -EINVAL if the property does not exist, -ENODATA if property
- * does not have a value, and -EILSEQ if the string is not null-terminated
- * within the length of the property data.
+ * Don't call this function directly. It is a utility helper for the
+ * of_property_read_string*() family of functions.
  */
-int of_property_count_strings(struct device_node *np, const char *propname)
+int of_property_read_string_helper(struct device_node *np, const char *propname,
+                                  const char **out_strs, size_t sz, int skip)
 {
        struct property *prop = of_find_property(np, propname, NULL);
-       int i = 0;
-       size_t l = 0, total = 0;
-       const char *p;
+       int l = 0, i = 0;
+       const char *p, *end;
 
        if (!prop)
                return -EINVAL;
        if (!prop->value)
                return -ENODATA;
-       if (strnlen(prop->value, prop->length) >= prop->length)
-               return -EILSEQ;
-
        p = prop->value;
+       end = p + prop->length;
 
-       for (i = 0; total < prop->length; total += l, p += l, i++)
-               l = strlen(p) + 1;
-
-       return i;
+       for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) {
+               l = strnlen(p, end - p) + 1;
+               if (p + l > end)
+                       return -EILSEQ;
+               if (out_strs && i >= skip)
+                       *out_strs++ = p;
+       }
+       i -= skip;
+       return i <= 0 ? -ENODATA : i;
 }
-EXPORT_SYMBOL_GPL(of_property_count_strings);
+EXPORT_SYMBOL_GPL(of_property_read_string_helper);
 
 void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
 {
index f297891d852908ae3e78fe9bfa2bfe5b231875e6..d4994177dec25506e454bf1556050ffc2754d084 100644 (file)
@@ -247,7 +247,7 @@ void of_node_release(struct kobject *kobj)
  * @allocflags:        Allocation flags (typically pass GFP_KERNEL)
  *
  * Copy a property by dynamically allocating the memory of both the
- * property stucture and the property name & contents. The property's
+ * property structure and the property name & contents. The property's
  * flags have the OF_DYNAMIC bit set so that we can differentiate between
  * dynamically allocated properties and not.
  * Returns the newly allocated property or NULL on out of memory error.
index d1ffca8b34eac51853c812a230cfd438862f8cff..d134710de96dabcf873ed8ea135611784c60b1d0 100644 (file)
@@ -773,7 +773,7 @@ int __init early_init_dt_scan_chosen_serial(void)
        if (offset < 0)
                return -ENODEV;
 
-       while (match->compatible) {
+       while (match->compatible[0]) {
                unsigned long addr;
                if (fdt_node_check_compatible(fdt, offset, match->compatible)) {
                        match++;
@@ -964,8 +964,6 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
 int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
                                        phys_addr_t size, bool nomap)
 {
-       if (memblock_is_region_reserved(base, size))
-               return -EBUSY;
        if (nomap)
                return memblock_remove(base, size);
        return memblock_reserve(base, size);
index 78001270a5980bc7342a5241c8930297cf92f883..e2d79afa9dc6153613c4281570eb620332249c8c 100644 (file)
@@ -339,8 +339,9 @@ static void __init of_selftest_parse_phandle_with_args(void)
        selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
 }
 
-static void __init of_selftest_property_match_string(void)
+static void __init of_selftest_property_string(void)
 {
+       const char *strings[4];
        struct device_node *np;
        int rc;
 
@@ -357,13 +358,66 @@ static void __init of_selftest_property_match_string(void)
        rc = of_property_match_string(np, "phandle-list-names", "third");
        selftest(rc == 2, "third expected:0 got:%i\n", rc);
        rc = of_property_match_string(np, "phandle-list-names", "fourth");
-       selftest(rc == -ENODATA, "unmatched string; rc=%i", rc);
+       selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
        rc = of_property_match_string(np, "missing-property", "blah");
-       selftest(rc == -EINVAL, "missing property; rc=%i", rc);
+       selftest(rc == -EINVAL, "missing property; rc=%i\n", rc);
        rc = of_property_match_string(np, "empty-property", "blah");
-       selftest(rc == -ENODATA, "empty property; rc=%i", rc);
+       selftest(rc == -ENODATA, "empty property; rc=%i\n", rc);
        rc = of_property_match_string(np, "unterminated-string", "blah");
-       selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc);
+       selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
+
+       /* of_property_count_strings() tests */
+       rc = of_property_count_strings(np, "string-property");
+       selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
+       rc = of_property_count_strings(np, "phandle-list-names");
+       selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
+       rc = of_property_count_strings(np, "unterminated-string");
+       selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
+       rc = of_property_count_strings(np, "unterminated-string-list");
+       selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
+
+       /* of_property_read_string_index() tests */
+       rc = of_property_read_string_index(np, "string-property", 0, strings);
+       selftest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc);
+       strings[0] = NULL;
+       rc = of_property_read_string_index(np, "string-property", 1, strings);
+       selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
+       rc = of_property_read_string_index(np, "phandle-list-names", 0, strings);
+       selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
+       rc = of_property_read_string_index(np, "phandle-list-names", 1, strings);
+       selftest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc);
+       rc = of_property_read_string_index(np, "phandle-list-names", 2, strings);
+       selftest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc);
+       strings[0] = NULL;
+       rc = of_property_read_string_index(np, "phandle-list-names", 3, strings);
+       selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
+       strings[0] = NULL;
+       rc = of_property_read_string_index(np, "unterminated-string", 0, strings);
+       selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
+       rc = of_property_read_string_index(np, "unterminated-string-list", 0, strings);
+       selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
+       strings[0] = NULL;
+       rc = of_property_read_string_index(np, "unterminated-string-list", 2, strings); /* should fail */
+       selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
+       strings[1] = NULL;
+
+       /* of_property_read_string_array() tests */
+       rc = of_property_read_string_array(np, "string-property", strings, 4);
+       selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
+       rc = of_property_read_string_array(np, "phandle-list-names", strings, 4);
+       selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
+       rc = of_property_read_string_array(np, "unterminated-string", strings, 4);
+       selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
+       /* -- An incorrectly formed string should cause a failure */
+       rc = of_property_read_string_array(np, "unterminated-string-list", strings, 4);
+       selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
+       /* -- parsing the correctly formed strings should still work: */
+       strings[2] = NULL;
+       rc = of_property_read_string_array(np, "unterminated-string-list", strings, 2);
+       selftest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc);
+       strings[1] = NULL;
+       rc = of_property_read_string_array(np, "phandle-list-names", strings, 1);
+       selftest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]);
 }
 
 #define propcmp(p1, p2) (((p1)->length == (p2)->length) && \
@@ -842,10 +896,14 @@ static void selftest_data_remove(void)
                return;
        }
 
-       while (last_node_index >= 0) {
+       while (last_node_index-- > 0) {
                if (nodes[last_node_index]) {
                        np = of_find_node_by_path(nodes[last_node_index]->full_name);
-                       if (strcmp(np->full_name, "/aliases") != 0) {
+                       if (np == nodes[last_node_index]) {
+                               if (of_aliases == np) {
+                                       of_node_put(of_aliases);
+                                       of_aliases = NULL;
+                               }
                                detach_node_and_children(np);
                        } else {
                                for_each_property_of_node(np, prop) {
@@ -854,7 +912,6 @@ static void selftest_data_remove(void)
                                }
                        }
                }
-               last_node_index--;
        }
 }
 
@@ -867,6 +924,8 @@ static int __init of_selftest(void)
        res = selftest_data_add();
        if (res)
                return res;
+       if (!of_aliases)
+               of_aliases = of_find_node_by_path("/aliases");
 
        np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
        if (!np) {
@@ -881,7 +940,7 @@ static int __init of_selftest(void)
        of_selftest_find_node_by_name();
        of_selftest_dynamic();
        of_selftest_parse_phandle_with_args();
-       of_selftest_property_match_string();
+       of_selftest_property_string();
        of_selftest_property_copy();
        of_selftest_changeset();
        of_selftest_parse_interrupts();
index ce0fe083d4062eec02145105b44691741337667d..5b1527e8a7fb3ae471a359aed37daa81bd44c0ce 100644 (file)
@@ -39,7 +39,9 @@
                                phandle-list-bad-args = <&provider2 1 0>,
                                                        <&provider3 0>;
                                empty-property;
+                               string-property = "foobar";
                                unterminated-string = [40 41 42 43];
+                               unterminated-string-list = "first", "second", [40 41 42 43];
                        };
                };
        };
index d292d7cb3417062643379805a969f5d29718de2f..49dd766852ba58f5ebad21199e38f1e2a912b498 100644 (file)
@@ -444,7 +444,7 @@ static inline int pcie_cap_version(const struct pci_dev *dev)
        return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
 }
 
-static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
+bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
 {
        int type = pci_pcie_type(dev);
 
index 3d43874319bebb13999889522f8d85e61b7251a2..19bb19c7db4a77eb649ddc3c9fc77b95a1f7c479 100644 (file)
@@ -276,6 +276,7 @@ struct tegra_pcie {
 
        struct resource all;
        struct resource io;
+       struct resource pio;
        struct resource mem;
        struct resource prefetch;
        struct resource busn;
@@ -658,7 +659,6 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
 {
        struct tegra_pcie *pcie = sys_to_pcie(sys);
        int err;
-       phys_addr_t io_start;
 
        err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
        if (err < 0)
@@ -668,14 +668,12 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
        if (err)
                return err;
 
-       io_start = pci_pio_to_address(pcie->io.start);
-
        pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
        pci_add_resource_offset(&sys->resources, &pcie->prefetch,
                                sys->mem_offset);
        pci_add_resource(&sys->resources, &pcie->busn);
 
-       pci_ioremap_io(nr * SZ_64K, io_start);
+       pci_ioremap_io(pcie->pio.start, pcie->io.start);
 
        return 1;
 }
@@ -786,7 +784,6 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
 {
        u32 fpci_bar, size, axi_address;
-       phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
 
        /* Bar 0: type 1 extended configuration space */
        fpci_bar = 0xfe100000;
@@ -799,7 +796,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
        /* Bar 1: downstream IO bar */
        fpci_bar = 0xfdfc0000;
        size = resource_size(&pcie->io);
-       axi_address = io_start;
+       axi_address = pcie->io.start;
        afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
        afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
        afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
@@ -1690,8 +1687,23 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
 
                switch (res.flags & IORESOURCE_TYPE_BITS) {
                case IORESOURCE_IO:
-                       memcpy(&pcie->io, &res, sizeof(res));
-                       pcie->io.name = np->full_name;
+                       memcpy(&pcie->pio, &res, sizeof(res));
+                       pcie->pio.name = np->full_name;
+
+                       /*
+                        * The Tegra PCIe host bridge uses this to program the
+                        * mapping of the I/O space to the physical address,
+                        * so we override the .start and .end fields here that
+                        * of_pci_range_to_resource() converted to I/O space.
+                        * We also set the IORESOURCE_MEM type to clarify that
+                        * the resource is in the physical memory space.
+                        */
+                       pcie->io.start = range.cpu_addr;
+                       pcie->io.end = range.cpu_addr + range.size - 1;
+                       pcie->io.flags = IORESOURCE_MEM;
+                       pcie->io.name = "I/O";
+
+                       memcpy(&res, &pcie->io, sizeof(res));
                        break;
 
                case IORESOURCE_MEM:
index 9ecabfa8c6343a83afba9d33b101bc4161b6bd3f..2988fe136c1e3ede0240e90db80d6e4e013e6e8a 100644 (file)
@@ -631,10 +631,15 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       bus = pci_scan_root_bus(&pdev->dev, 0, &xgene_pcie_ops, port, &res);
+       bus = pci_create_root_bus(&pdev->dev, 0,
+                                       &xgene_pcie_ops, port, &res);
        if (!bus)
                return -ENOMEM;
 
+       pci_scan_child_bus(bus);
+       pci_assign_unassigned_bus_resources(bus);
+       pci_bus_add_devices(bus);
+
        platform_set_drvdata(pdev, port);
        return 0;
 }
index 9fab30af0e75abdcec135707363951d7e9e26f8c..084587d7cd134ce0e8e20410368f5b60b9e88f74 100644 (file)
@@ -590,6 +590,20 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev)
        return entry;
 }
 
+static int msi_verify_entries(struct pci_dev *dev)
+{
+       struct msi_desc *entry;
+
+       list_for_each_entry(entry, &dev->msi_list, list) {
+               if (!dev->no_64bit_msi || !entry->msg.address_hi)
+                       continue;
+               dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
+                       " tried to assign one above 4G\n");
+               return -EIO;
+       }
+       return 0;
+}
+
 /**
  * msi_capability_init - configure device's MSI capability structure
  * @dev: pointer to the pci_dev data structure of MSI device function
@@ -627,6 +641,13 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
                return ret;
        }
 
+       ret = msi_verify_entries(dev);
+       if (ret) {
+               msi_mask_irq(entry, mask, ~mask);
+               free_msi_irqs(dev);
+               return ret;
+       }
+
        ret = populate_msi_sysfs(dev);
        if (ret) {
                msi_mask_irq(entry, mask, ~mask);
@@ -739,6 +760,11 @@ static int msix_capability_init(struct pci_dev *dev,
        if (ret)
                goto out_avail;
 
+       /* Check if all MSI entries honor device restrictions */
+       ret = msi_verify_entries(dev);
+       if (ret)
+               goto out_free;
+
        /*
         * Some devices require MSI-X to be enabled before we can touch the
         * MSI-X registers.  We need to mask all the vectors to prevent
index 0601890db22de96131af2d67fd872651ab608133..4a3902d8e6fec7984bb483b55aee4115101cc1f4 100644 (file)
@@ -6,6 +6,8 @@
 
 extern const unsigned char pcie_link_speed[];
 
+bool pcie_cap_has_lnkctl(const struct pci_dev *dev);
+
 /* Functions internal to the PCI core code */
 
 int pci_create_sysfs_dev_files(struct pci_dev *pdev);
index 5ed99309c75800938b666c5c523d7ea3ca07d51f..c8ca98c2b480a41d57676ef21e97cd6612134110 100644 (file)
@@ -407,15 +407,16 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
 {
        struct pci_dev *dev = child->self;
        u16 mem_base_lo, mem_limit_lo;
-       unsigned long base, limit;
+       u64 base64, limit64;
+       dma_addr_t base, limit;
        struct pci_bus_region region;
        struct resource *res;
 
        res = child->resource[2];
        pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
        pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
-       base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
-       limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
+       base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
+       limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
 
        if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
                u32 mem_base_hi, mem_limit_hi;
@@ -429,17 +430,20 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
                 * this, just assume they are not being used.
                 */
                if (mem_base_hi <= mem_limit_hi) {
-#if BITS_PER_LONG == 64
-                       base |= ((unsigned long) mem_base_hi) << 32;
-                       limit |= ((unsigned long) mem_limit_hi) << 32;
-#else
-                       if (mem_base_hi || mem_limit_hi) {
-                               dev_err(&dev->dev, "can't handle 64-bit address space for bridge\n");
-                               return;
-                       }
-#endif
+                       base64 |= (u64) mem_base_hi << 32;
+                       limit64 |= (u64) mem_limit_hi << 32;
                }
        }
+
+       base = (dma_addr_t) base64;
+       limit = (dma_addr_t) limit64;
+
+       if (base != base64) {
+               dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
+                       (unsigned long long) base64);
+               return;
+       }
+
        if (base <= limit) {
                res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
                                         IORESOURCE_MEM | IORESOURCE_PREFETCH;
@@ -1323,7 +1327,7 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
                        ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
 
        /* Initialize Link Control Register */
-       if (dev->subordinate)
+       if (pcie_cap_has_lnkctl(dev))
                pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
                        ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
 
index 8c842980834a2cebb40a77d31bc30f5f6ed643ae..f091576b644903a2ab1320550c090479a6f8d56c 100644 (file)
@@ -258,14 +258,16 @@ static int omap_usb2_probe(struct platform_device *pdev)
        otg->phy                = &phy->phy;
 
        platform_set_drvdata(pdev, phy);
+       pm_runtime_enable(phy->dev);
 
        generic_phy = devm_phy_create(phy->dev, NULL, &ops, NULL);
-       if (IS_ERR(generic_phy))
+       if (IS_ERR(generic_phy)) {
+               pm_runtime_disable(phy->dev);
                return PTR_ERR(generic_phy);
+       }
 
        phy_set_drvdata(generic_phy, phy);
 
-       pm_runtime_enable(phy->dev);
        phy_provider = devm_of_phy_provider_register(phy->dev,
                        of_phy_simple_xlate);
        if (IS_ERR(phy_provider)) {
index e12e5b07f6d751aba9cce63cf49d2acabe50b7ab..9dc38140194bf8a8f6e4765ea9c790e112beaa31 100644 (file)
@@ -227,10 +227,14 @@ static int byt_irq_type(struct irq_data *d, unsigned type)
        spin_lock_irqsave(&vg->lock, flags);
        value = readl(reg);
 
+       WARN(value & BYT_DIRECT_IRQ_EN,
+               "Bad pad config for io mode, force direct_irq_en bit clearing");
+
        /* For level trigges the BYT_TRIG_POS and BYT_TRIG_NEG bits
         * are used to indicate high and low level triggering
         */
-       value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+       value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG |
+                  BYT_TRIG_LVL);
 
        switch (type) {
        case IRQ_TYPE_LEVEL_HIGH:
@@ -318,7 +322,7 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
                "Potential Error: Setting GPIO with direct_irq_en to output");
 
        reg_val = readl(reg) | BYT_DIR_MASK;
-       reg_val &= ~BYT_OUTPUT_EN;
+       reg_val &= ~(BYT_OUTPUT_EN | BYT_INPUT_EN);
 
        if (value)
                writel(reg_val | BYT_LEVEL, reg);
index 4dcfb7116a0487656bf489a45c6345c18ea1e219..a2eabe6ff9ada9b7bff3991a0116c8137a78b92d 100644 (file)
@@ -202,6 +202,7 @@ config TC1100_WMI
 config HP_ACCEL
        tristate "HP laptop accelerometer"
        depends on INPUT && ACPI
+       depends on SERIO_I8042
        select SENSORS_LIS3LV02D
        select NEW_LEDS
        select LEDS_CLASS
index 96a0b75c52c9a6232c99b18e61751f3eb62e940a..26c4fd1394da553f465612d096fe30316784b7f1 100644 (file)
@@ -579,6 +579,17 @@ static const struct dmi_system_id video_vendor_dmi_table[] __initconst = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5741"),
                },
        },
+       {
+               /*
+                * Note no video_set_backlight_video_vendor, we must use the
+                * acer interface, as there is no native backlight interface.
+                */
+               .ident = "Acer KAV80",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"),
+               },
+       },
        {}
 };
 
index 3a4951f46065dc95abf7767572426566027e1381..c1a6cd66af421753668c0746a78c0870048ff7f9 100644 (file)
@@ -180,6 +180,15 @@ static const struct dmi_system_id asus_quirks[] = {
                },
                .driver_data = &quirk_asus_wapf4,
        },
+       {
+               .callback = dmi_matched,
+               .ident = "ASUSTeK COMPUTER INC. X550VB",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X550VB"),
+               },
+               .driver_data = &quirk_asus_wapf4,
+       },
        {
                .callback = dmi_matched,
                .ident = "ASUSTeK COMPUTER INC. X55A",
index 13e14ec1d3d7118c4e4ab63ac98a2ef0c1258118..6bec745b6b92dc6e3f849a12da003b639d1900ed 100644 (file)
@@ -37,6 +37,8 @@
 #include <linux/leds.h>
 #include <linux/atomic.h>
 #include <linux/acpi.h>
+#include <linux/i8042.h>
+#include <linux/serio.h>
 #include "../../misc/lis3lv02d/lis3lv02d.h"
 
 #define DRIVER_NAME     "hp_accel"
@@ -73,6 +75,13 @@ static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
 
 /* HP-specific accelerometer driver ------------------------------------ */
 
+/* e0 25, e0 26, e0 27, e0 28 are scan codes that the accelerometer with acpi id
+ * HPQ6000 sends through the keyboard bus */
+#define ACCEL_1 0x25
+#define ACCEL_2 0x26
+#define ACCEL_3 0x27
+#define ACCEL_4 0x28
+
 /* For automatic insertion of the module */
 static const struct acpi_device_id lis3lv02d_device_ids[] = {
        {"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
@@ -294,6 +303,35 @@ static void lis3lv02d_enum_resources(struct acpi_device *device)
                printk(KERN_DEBUG DRIVER_NAME ": Error getting resources\n");
 }
 
+static bool hp_accel_i8042_filter(unsigned char data, unsigned char str,
+                                 struct serio *port)
+{
+       static bool extended;
+
+       if (str & I8042_STR_AUXDATA)
+               return false;
+
+       if (data == 0xe0) {
+               extended = true;
+               return true;
+       } else if (unlikely(extended)) {
+               extended = false;
+
+               switch (data) {
+               case ACCEL_1:
+               case ACCEL_2:
+               case ACCEL_3:
+               case ACCEL_4:
+                       return true;
+               default:
+                       serio_interrupt(port, 0xe0, 0);
+                       return false;
+               }
+       }
+
+       return false;
+}
+
 static int lis3lv02d_add(struct acpi_device *device)
 {
        int ret;
@@ -326,6 +364,11 @@ static int lis3lv02d_add(struct acpi_device *device)
        if (ret)
                return ret;
 
+       /* filter to remove HPQ6000 accelerometer data
+        * from keyboard bus stream */
+       if (strstr(dev_name(&device->dev), "HPQ6000"))
+               i8042_install_filter(hp_accel_i8042_filter);
+
        INIT_WORK(&hpled_led.work, delayed_set_status_worker);
        ret = led_classdev_register(NULL, &hpled_led.led_classdev);
        if (ret) {
@@ -343,6 +386,7 @@ static int lis3lv02d_remove(struct acpi_device *device)
        if (!device)
                return -EINVAL;
 
+       i8042_remove_filter(hp_accel_i8042_filter);
        lis3lv02d_joystick_disable(&lis3_dev);
        lis3lv02d_poweroff(&lis3_dev);
 
index 02152de135b5c43b79318c6c5ac7971f0723e478..ed494f37c40f5b95d201c94a0bf9119c5b4b031f 100644 (file)
@@ -837,6 +837,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2"),
                },
        },
+       {
+               .ident = "Lenovo Yoga 3 Pro 1370",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3 Pro-1370"),
+               },
+       },
        {}
 };
 
index 5a59665122776d036cc43e885ec56845ea6b362b..ff765d8e1a09f648ad40770cdd59cf6aadaaf7e8 100644 (file)
@@ -1559,6 +1559,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                },
         .driver_data = &samsung_broken_acpi_video,
        },
+       {
+        .callback = samsung_dmi_matched,
+        .ident = "NC210",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
+               DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
+               },
+        .driver_data = &samsung_broken_acpi_video,
+       },
        {
         .callback = samsung_dmi_matched,
         .ident = "730U3E/740U3E",
index ef3a1904e92fe0827de83d79540bc1b569801554..ab6151f054204c46063ad824b14985c97e3b934c 100644 (file)
@@ -240,6 +240,12 @@ static const struct dmi_system_id toshiba_alt_keymap_dmi[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Qosmio X75-A"),
                },
        },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A50-A"),
+               },
+       },
        {}
 };
 
index 217da4b2ca8639ee5f2d5ced62615bfc558aed4f..99a78d365ceb11f2c3c6308296f62ce4db6c2a5d 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/time.h>
+#include <linux/time64.h>
 #include <linux/of.h>
 #include <linux/completion.h>
 #include <linux/mfd/core.h>
@@ -108,7 +109,7 @@ enum ab8500_fg_calibration_state {
 struct ab8500_fg_avg_cap {
        int avg;
        int samples[NBR_AVG_SAMPLES];
-       __kernel_time_t time_stamps[NBR_AVG_SAMPLES];
+       time64_t time_stamps[NBR_AVG_SAMPLES];
        int pos;
        int nbr_samples;
        int sum;
@@ -386,15 +387,15 @@ static int ab8500_fg_is_low_curr(struct ab8500_fg *di, int curr)
  */
 static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
 {
-       struct timespec ts;
+       struct timespec64 ts64;
        struct ab8500_fg_avg_cap *avg = &di->avg_cap;
 
-       getnstimeofday(&ts);
+       getnstimeofday64(&ts64);
 
        do {
                avg->sum += sample - avg->samples[avg->pos];
                avg->samples[avg->pos] = sample;
-               avg->time_stamps[avg->pos] = ts.tv_sec;
+               avg->time_stamps[avg->pos] = ts64.tv_sec;
                avg->pos++;
 
                if (avg->pos == NBR_AVG_SAMPLES)
@@ -407,7 +408,7 @@ static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
                 * Check the time stamp for each sample. If too old,
                 * replace with latest sample
                 */
-       } while (ts.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]);
+       } while (ts64.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]);
 
        avg->avg = avg->sum / avg->nbr_samples;
 
@@ -446,14 +447,14 @@ static void ab8500_fg_clear_cap_samples(struct ab8500_fg *di)
 static void ab8500_fg_fill_cap_sample(struct ab8500_fg *di, int sample)
 {
        int i;
-       struct timespec ts;
+       struct timespec64 ts64;
        struct ab8500_fg_avg_cap *avg = &di->avg_cap;
 
-       getnstimeofday(&ts);
+       getnstimeofday64(&ts64);
 
        for (i = 0; i < NBR_AVG_SAMPLES; i++) {
                avg->samples[i] = sample;
-               avg->time_stamps[i] = ts.tv_sec;
+               avg->time_stamps[i] = ts64.tv_sec;
        }
 
        avg->pos = 0;
index e384844a1ae1957ffd3687e7f54d0ff3eba2b59f..1f49986fc6057c8c4db858735ec8d9fd38326f8a 100644 (file)
@@ -1579,8 +1579,15 @@ static int bq2415x_probe(struct i2c_client *client,
        if (np) {
                bq->notify_psy = power_supply_get_by_phandle(np, "ti,usb-charger-detection");
 
-               if (!bq->notify_psy)
-                       return -EPROBE_DEFER;
+               if (IS_ERR(bq->notify_psy)) {
+                       dev_info(&client->dev,
+                               "no 'ti,usb-charger-detection' property (err=%ld)\n",
+                               PTR_ERR(bq->notify_psy));
+                       bq->notify_psy = NULL;
+               } else if (!bq->notify_psy) {
+                       ret = -EPROBE_DEFER;
+                       goto error_2;
+               }
        }
        else if (pdata->notify_device)
                bq->notify_psy = power_supply_get_by_name(pdata->notify_device);
@@ -1602,27 +1609,27 @@ static int bq2415x_probe(struct i2c_client *client,
                ret = of_property_read_u32(np, "ti,current-limit",
                                &bq->init_data.current_limit);
                if (ret)
-                       return ret;
+                       goto error_2;
                ret = of_property_read_u32(np, "ti,weak-battery-voltage",
                                &bq->init_data.weak_battery_voltage);
                if (ret)
-                       return ret;
+                       goto error_2;
                ret = of_property_read_u32(np, "ti,battery-regulation-voltage",
                                &bq->init_data.battery_regulation_voltage);
                if (ret)
-                       return ret;
+                       goto error_2;
                ret = of_property_read_u32(np, "ti,charge-current",
                                &bq->init_data.charge_current);
                if (ret)
-                       return ret;
+                       goto error_2;
                ret = of_property_read_u32(np, "ti,termination-current",
                                &bq->init_data.termination_current);
                if (ret)
-                       return ret;
+                       goto error_2;
                ret = of_property_read_u32(np, "ti,resistor-sense",
                                &bq->init_data.resistor_sense);
                if (ret)
-                       return ret;
+                       goto error_2;
        } else {
                memcpy(&bq->init_data, pdata, sizeof(bq->init_data));
        }
index 7098a1ce2d3c1068e2ef6cc5fbbe4d185ece9850..ef8094a61f1e91caa16fdcb204ce4ced9485334e 100644 (file)
@@ -97,6 +97,7 @@ static struct charger_global_desc *g_desc; /* init with setup_charger_manager */
 static bool is_batt_present(struct charger_manager *cm)
 {
        union power_supply_propval val;
+       struct power_supply *psy;
        bool present = false;
        int i, ret;
 
@@ -107,16 +108,27 @@ static bool is_batt_present(struct charger_manager *cm)
        case CM_NO_BATTERY:
                break;
        case CM_FUEL_GAUGE:
-               ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+               psy = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
+               if (!psy)
+                       break;
+
+               ret = psy->get_property(psy,
                                POWER_SUPPLY_PROP_PRESENT, &val);
                if (ret == 0 && val.intval)
                        present = true;
                break;
        case CM_CHARGER_STAT:
-               for (i = 0; cm->charger_stat[i]; i++) {
-                       ret = cm->charger_stat[i]->get_property(
-                                       cm->charger_stat[i],
-                                       POWER_SUPPLY_PROP_PRESENT, &val);
+               for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
+                       psy = power_supply_get_by_name(
+                                       cm->desc->psy_charger_stat[i]);
+                       if (!psy) {
+                               dev_err(cm->dev, "Cannot find power supply \"%s\"\n",
+                                       cm->desc->psy_charger_stat[i]);
+                               continue;
+                       }
+
+                       ret = psy->get_property(psy, POWER_SUPPLY_PROP_PRESENT,
+                                       &val);
                        if (ret == 0 && val.intval) {
                                present = true;
                                break;
@@ -139,14 +151,20 @@ static bool is_batt_present(struct charger_manager *cm)
 static bool is_ext_pwr_online(struct charger_manager *cm)
 {
        union power_supply_propval val;
+       struct power_supply *psy;
        bool online = false;
        int i, ret;
 
        /* If at least one of them has one, it's yes. */
-       for (i = 0; cm->charger_stat[i]; i++) {
-               ret = cm->charger_stat[i]->get_property(
-                               cm->charger_stat[i],
-                               POWER_SUPPLY_PROP_ONLINE, &val);
+       for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
+               psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]);
+               if (!psy) {
+                       dev_err(cm->dev, "Cannot find power supply \"%s\"\n",
+                                       cm->desc->psy_charger_stat[i]);
+                       continue;
+               }
+
+               ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val);
                if (ret == 0 && val.intval) {
                        online = true;
                        break;
@@ -167,12 +185,14 @@ static bool is_ext_pwr_online(struct charger_manager *cm)
 static int get_batt_uV(struct charger_manager *cm, int *uV)
 {
        union power_supply_propval val;
+       struct power_supply *fuel_gauge;
        int ret;
 
-       if (!cm->fuel_gauge)
+       fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
+       if (!fuel_gauge)
                return -ENODEV;
 
-       ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+       ret = fuel_gauge->get_property(fuel_gauge,
                                POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
        if (ret)
                return ret;
@@ -189,6 +209,7 @@ static bool is_charging(struct charger_manager *cm)
 {
        int i, ret;
        bool charging = false;
+       struct power_supply *psy;
        union power_supply_propval val;
 
        /* If there is no battery, it cannot be charged */
@@ -196,17 +217,22 @@ static bool is_charging(struct charger_manager *cm)
                return false;
 
        /* If at least one of the charger is charging, return yes */
-       for (i = 0; cm->charger_stat[i]; i++) {
+       for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
                /* 1. The charger sholuld not be DISABLED */
                if (cm->emergency_stop)
                        continue;
                if (!cm->charger_enabled)
                        continue;
 
+               psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]);
+               if (!psy) {
+                       dev_err(cm->dev, "Cannot find power supply \"%s\"\n",
+                                       cm->desc->psy_charger_stat[i]);
+                       continue;
+               }
+
                /* 2. The charger should be online (ext-power) */
-               ret = cm->charger_stat[i]->get_property(
-                               cm->charger_stat[i],
-                               POWER_SUPPLY_PROP_ONLINE, &val);
+               ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val);
                if (ret) {
                        dev_warn(cm->dev, "Cannot read ONLINE value from %s\n",
                                 cm->desc->psy_charger_stat[i]);
@@ -219,9 +245,7 @@ static bool is_charging(struct charger_manager *cm)
                 * 3. The charger should not be FULL, DISCHARGING,
                 * or NOT_CHARGING.
                 */
-               ret = cm->charger_stat[i]->get_property(
-                               cm->charger_stat[i],
-                               POWER_SUPPLY_PROP_STATUS, &val);
+               ret = psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &val);
                if (ret) {
                        dev_warn(cm->dev, "Cannot read STATUS value from %s\n",
                                 cm->desc->psy_charger_stat[i]);
@@ -248,6 +272,7 @@ static bool is_full_charged(struct charger_manager *cm)
 {
        struct charger_desc *desc = cm->desc;
        union power_supply_propval val;
+       struct power_supply *fuel_gauge;
        int ret = 0;
        int uV;
 
@@ -255,11 +280,15 @@ static bool is_full_charged(struct charger_manager *cm)
        if (!is_batt_present(cm))
                return false;
 
-       if (cm->fuel_gauge && desc->fullbatt_full_capacity > 0) {
+       fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
+       if (!fuel_gauge)
+               return false;
+
+       if (desc->fullbatt_full_capacity > 0) {
                val.intval = 0;
 
                /* Not full if capacity of fuel gauge isn't full */
-               ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+               ret = fuel_gauge->get_property(fuel_gauge,
                                POWER_SUPPLY_PROP_CHARGE_FULL, &val);
                if (!ret && val.intval > desc->fullbatt_full_capacity)
                        return true;
@@ -273,10 +302,10 @@ static bool is_full_charged(struct charger_manager *cm)
        }
 
        /* Full, if the capacity is more than fullbatt_soc */
-       if (cm->fuel_gauge && desc->fullbatt_soc > 0) {
+       if (desc->fullbatt_soc > 0) {
                val.intval = 0;
 
-               ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+               ret = fuel_gauge->get_property(fuel_gauge,
                                POWER_SUPPLY_PROP_CAPACITY, &val);
                if (!ret && val.intval >= desc->fullbatt_soc)
                        return true;
@@ -551,6 +580,20 @@ static int check_charging_duration(struct charger_manager *cm)
        return ret;
 }
 
+static int cm_get_battery_temperature_by_psy(struct charger_manager *cm,
+                                       int *temp)
+{
+       struct power_supply *fuel_gauge;
+
+       fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
+       if (!fuel_gauge)
+               return -ENODEV;
+
+       return fuel_gauge->get_property(fuel_gauge,
+                               POWER_SUPPLY_PROP_TEMP,
+                               (union power_supply_propval *)temp);
+}
+
 static int cm_get_battery_temperature(struct charger_manager *cm,
                                        int *temp)
 {
@@ -560,15 +603,18 @@ static int cm_get_battery_temperature(struct charger_manager *cm,
                return -ENODEV;
 
 #ifdef CONFIG_THERMAL
-       ret = thermal_zone_get_temp(cm->tzd_batt, (unsigned long *)temp);
-       if (!ret)
-               /* Calibrate temperature unit */
-               *temp /= 100;
-#else
-       ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
-                               POWER_SUPPLY_PROP_TEMP,
-                               (union power_supply_propval *)temp);
+       if (cm->tzd_batt) {
+               ret = thermal_zone_get_temp(cm->tzd_batt, (unsigned long *)temp);
+               if (!ret)
+                       /* Calibrate temperature unit */
+                       *temp /= 100;
+       } else
 #endif
+       {
+               /* if-else continued from CONFIG_THERMAL */
+               ret = cm_get_battery_temperature_by_psy(cm, temp);
+       }
+
        return ret;
 }
 
@@ -827,6 +873,7 @@ static int charger_get_property(struct power_supply *psy,
        struct charger_manager *cm = container_of(psy,
                        struct charger_manager, charger_psy);
        struct charger_desc *desc = cm->desc;
+       struct power_supply *fuel_gauge;
        int ret = 0;
        int uV;
 
@@ -857,14 +904,20 @@ static int charger_get_property(struct power_supply *psy,
                ret = get_batt_uV(cm, &val->intval);
                break;
        case POWER_SUPPLY_PROP_CURRENT_NOW:
-               ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+               fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
+               if (!fuel_gauge) {
+                       ret = -ENODEV;
+                       break;
+               }
+               ret = fuel_gauge->get_property(fuel_gauge,
                                POWER_SUPPLY_PROP_CURRENT_NOW, val);
                break;
        case POWER_SUPPLY_PROP_TEMP:
        case POWER_SUPPLY_PROP_TEMP_AMBIENT:
                return cm_get_battery_temperature(cm, &val->intval);
        case POWER_SUPPLY_PROP_CAPACITY:
-               if (!cm->fuel_gauge) {
+               fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
+               if (!fuel_gauge) {
                        ret = -ENODEV;
                        break;
                }
@@ -875,7 +928,7 @@ static int charger_get_property(struct power_supply *psy,
                        break;
                }
 
-               ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+               ret = fuel_gauge->get_property(fuel_gauge,
                                        POWER_SUPPLY_PROP_CAPACITY, val);
                if (ret)
                        break;
@@ -924,7 +977,14 @@ static int charger_get_property(struct power_supply *psy,
                break;
        case POWER_SUPPLY_PROP_CHARGE_NOW:
                if (is_charging(cm)) {
-                       ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+                       fuel_gauge = power_supply_get_by_name(
+                                       cm->desc->psy_fuel_gauge);
+                       if (!fuel_gauge) {
+                               ret = -ENODEV;
+                               break;
+                       }
+
+                       ret = fuel_gauge->get_property(fuel_gauge,
                                                POWER_SUPPLY_PROP_CHARGE_NOW,
                                                val);
                        if (ret) {
@@ -970,6 +1030,7 @@ static struct power_supply psy_default = {
        .properties = default_charger_props,
        .num_properties = ARRAY_SIZE(default_charger_props),
        .get_property = charger_get_property,
+       .no_thermal = true,
 };
 
 /**
@@ -1485,14 +1546,15 @@ err:
        return ret;
 }
 
-static int cm_init_thermal_data(struct charger_manager *cm)
+static int cm_init_thermal_data(struct charger_manager *cm,
+               struct power_supply *fuel_gauge)
 {
        struct charger_desc *desc = cm->desc;
        union power_supply_propval val;
        int ret;
 
        /* Verify whether fuel gauge provides battery temperature */
-       ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+       ret = fuel_gauge->get_property(fuel_gauge,
                                        POWER_SUPPLY_PROP_TEMP, &val);
 
        if (!ret) {
@@ -1502,8 +1564,6 @@ static int cm_init_thermal_data(struct charger_manager *cm)
                cm->desc->measure_battery_temp = true;
        }
 #ifdef CONFIG_THERMAL
-       cm->tzd_batt = cm->fuel_gauge->tzd;
-
        if (ret && desc->thermal_zone) {
                cm->tzd_batt =
                        thermal_zone_get_zone_by_name(desc->thermal_zone);
@@ -1666,6 +1726,7 @@ static int charger_manager_probe(struct platform_device *pdev)
        int ret = 0, i = 0;
        int j = 0;
        union power_supply_propval val;
+       struct power_supply *fuel_gauge;
 
        if (g_desc && !rtc_dev && g_desc->rtc_name) {
                rtc_dev = rtc_class_open(g_desc->rtc_name);
@@ -1729,23 +1790,20 @@ static int charger_manager_probe(struct platform_device *pdev)
        while (desc->psy_charger_stat[i])
                i++;
 
-       cm->charger_stat = devm_kzalloc(&pdev->dev,
-                               sizeof(struct power_supply *) * i, GFP_KERNEL);
-       if (!cm->charger_stat)
-               return -ENOMEM;
-
+       /* Check if charger's supplies are present at probe */
        for (i = 0; desc->psy_charger_stat[i]; i++) {
-               cm->charger_stat[i] = power_supply_get_by_name(
-                                       desc->psy_charger_stat[i]);
-               if (!cm->charger_stat[i]) {
+               struct power_supply *psy;
+
+               psy = power_supply_get_by_name(desc->psy_charger_stat[i]);
+               if (!psy) {
                        dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n",
                                desc->psy_charger_stat[i]);
                        return -ENODEV;
                }
        }
 
-       cm->fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge);
-       if (!cm->fuel_gauge) {
+       fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge);
+       if (!fuel_gauge) {
                dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n",
                        desc->psy_fuel_gauge);
                return -ENODEV;
@@ -1788,13 +1846,13 @@ static int charger_manager_probe(struct platform_device *pdev)
        cm->charger_psy.num_properties = psy_default.num_properties;
 
        /* Find which optional psy-properties are available */
-       if (!cm->fuel_gauge->get_property(cm->fuel_gauge,
+       if (!fuel_gauge->get_property(fuel_gauge,
                                          POWER_SUPPLY_PROP_CHARGE_NOW, &val)) {
                cm->charger_psy.properties[cm->charger_psy.num_properties] =
                                POWER_SUPPLY_PROP_CHARGE_NOW;
                cm->charger_psy.num_properties++;
        }
-       if (!cm->fuel_gauge->get_property(cm->fuel_gauge,
+       if (!fuel_gauge->get_property(fuel_gauge,
                                          POWER_SUPPLY_PROP_CURRENT_NOW,
                                          &val)) {
                cm->charger_psy.properties[cm->charger_psy.num_properties] =
@@ -1802,7 +1860,7 @@ static int charger_manager_probe(struct platform_device *pdev)
                cm->charger_psy.num_properties++;
        }
 
-       ret = cm_init_thermal_data(cm);
+       ret = cm_init_thermal_data(cm, fuel_gauge);
        if (ret) {
                dev_err(&pdev->dev, "Failed to initialize thermal data\n");
                cm->desc->measure_battery_temp = false;
@@ -2066,8 +2124,8 @@ static bool find_power_supply(struct charger_manager *cm,
        int i;
        bool found = false;
 
-       for (i = 0; cm->charger_stat[i]; i++) {
-               if (psy == cm->charger_stat[i]) {
+       for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
+               if (!strcmp(psy->name, cm->desc->psy_charger_stat[i])) {
                        found = true;
                        break;
                }
index 6cb7fe5c022d48ca237e31eb89e78f8f70665776..694e8cddd5c13e1760e7c9cd5fc0170876a9df04 100644 (file)
@@ -417,6 +417,9 @@ static int psy_register_thermal(struct power_supply *psy)
 {
        int i;
 
+       if (psy->no_thermal)
+               return 0;
+
        /* Register battery zone device psy reports temperature */
        for (i = 0; i < psy->num_properties; i++) {
                if (psy->properties[i] == POWER_SUPPLY_PROP_TEMP) {
index 86db310d5304ad00dadcd605c6f9a9deb63a652c..d2a8c64cae42a9bc83890e922b7971781347ee62 100644 (file)
@@ -163,7 +163,7 @@ static int of_get_max1586_platform_data(struct device *dev,
                                 struct max1586_platform_data *pdata)
 {
        struct max1586_subdev_data *sub;
-       struct of_regulator_match rmatch[ARRAY_SIZE(max1586_reg)];
+       struct of_regulator_match rmatch[ARRAY_SIZE(max1586_reg)] = { };
        struct device_node *np = dev->of_node;
        int i, matched;
 
index ef1af2debbd293af555cbcde4ed5c72d866ad47a..f69320e1738f7474b517c9a2b0e030065c4f95d0 100644 (file)
@@ -395,7 +395,7 @@ static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
        struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
        struct device_node *pmic_np, *regulators_np;
        struct max77686_regulator_data *rdata;
-       struct of_regulator_match rmatch;
+       struct of_regulator_match rmatch = { };
        unsigned int i;
 
        pmic_np = iodev->dev->of_node;
index c67ff05fc1dd1354cd0a72601f31b407f99638ad..d158f71fa12817ef8f34c40b1955a910f115375f 100644 (file)
@@ -227,7 +227,7 @@ static int max77693_pmic_probe(struct platform_device *pdev)
        struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent);
        struct max77693_regulator_data *rdata = NULL;
        int num_rdata, i;
-       struct regulator_config config;
+       struct regulator_config config = { };
 
        num_rdata = max77693_pmic_init_rdata(&pdev->dev, &rdata);
        if (!rdata || num_rdata <= 0) {
index d89792b084e937a9fd29bbc28defe65ab6c07959..45fa240fe243e17c72685a94bb36c6a9c8e1ffbd 100644 (file)
@@ -454,7 +454,7 @@ static int max77802_pmic_dt_parse_pdata(struct platform_device *pdev,
        struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
        struct device_node *pmic_np, *regulators_np;
        struct max77686_regulator_data *rdata;
-       struct of_regulator_match rmatch;
+       struct of_regulator_match rmatch = { };
        unsigned int i;
 
        pmic_np = iodev->dev->of_node;
index 2fc4111887949e01e04b023d6065962853779d42..7eee2ca1854183b05e4d4b08180020afc7337033 100644 (file)
@@ -335,7 +335,7 @@ static int max8660_pdata_from_dt(struct device *dev,
        int matched, i;
        struct device_node *np;
        struct max8660_subdev_data *sub;
-       struct of_regulator_match rmatch[ARRAY_SIZE(max8660_reg)];
+       struct of_regulator_match rmatch[ARRAY_SIZE(max8660_reg)] = { };
 
        np = of_get_child_by_name(dev->of_node, "regulators");
        if (!np) {
index 7a51814abdc56b5dba53dfe8d9ea95108899d423..5a1d4afa4776231e457f5606e0a53c514477b510 100644 (file)
@@ -211,7 +211,8 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
                search = dev->of_node;
 
        if (!search) {
-               dev_err(dev, "Failed to find regulator container node\n");
+               dev_dbg(dev, "Failed to find regulator container node '%s'\n",
+                       desc->regulators_node);
                return NULL;
        }
 
index 4acefa6b462e0d8e362866b1b86bc2ca4177bb00..7633b9bfbe6e971dc63447aa7caa334407819aae 100644 (file)
@@ -341,7 +341,7 @@ static int s2mpa01_pmic_probe(struct platform_device *pdev)
 {
        struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
        struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
-       struct of_regulator_match rdata[S2MPA01_REGULATOR_MAX];
+       struct of_regulator_match rdata[S2MPA01_REGULATOR_MAX] = { };
        struct device_node *reg_np = NULL;
        struct regulator_config config = { };
        struct s2mpa01_info *s2mpa01;
index 6cbe6ef3c889d14841e2c73aa848fa9f563b68ec..bda52f18e9670aefe1a7757209fdf038c866ad5a 100644 (file)
@@ -888,7 +888,6 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
        struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
        int i;
        struct virtqueue *vq;
-       struct virtio_driver *drv;
 
        if (!vcdev)
                return;
index ca75c7ca25590cdb15ae562bd9bd87cd28fcce9b..ef355c13ccc47c9a9f05cde438408e4e56b3eb3d 100644 (file)
@@ -480,9 +480,7 @@ void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
                        bnx2fc_initiate_cleanup(orig_io_req);
                        /* Post a new IO req with the same sc_cmd */
                        BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
-                       spin_unlock_bh(&tgt->tgt_lock);
                        rc = bnx2fc_post_io_req(tgt, new_io_req);
-                       spin_lock_bh(&tgt->tgt_lock);
                        if (!rc)
                                goto free_frame;
                        BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
index 79e5c94107a9cc44fe8269f55ab72e8150005e0b..72533c58c1f3bc0d6a18412a197651399abbf6a2 100644 (file)
@@ -412,6 +412,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
        struct fc_frame_header *fh;
        struct fcoe_rcv_info *fr;
        struct fcoe_percpu_s *bg;
+       struct sk_buff *tmp_skb;
        unsigned short oxid;
 
        interface = container_of(ptype, struct bnx2fc_interface,
@@ -424,6 +425,12 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
                goto err;
        }
 
+       tmp_skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!tmp_skb)
+               goto err;
+
+       skb = tmp_skb;
+
        if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
                printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
                goto err;
index 0679782d9d15e05b5d536c95b25dd643249f142e..5b99844ef6bf73209518c8e586825bbc1486866e 100644 (file)
@@ -1894,18 +1894,24 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,
                        goto exit_qcmd;
                }
        }
+
+       spin_lock_bh(&tgt->tgt_lock);
+
        io_req = bnx2fc_cmd_alloc(tgt);
        if (!io_req) {
                rc = SCSI_MLQUEUE_HOST_BUSY;
-               goto exit_qcmd;
+               goto exit_qcmd_tgtlock;
        }
        io_req->sc_cmd = sc_cmd;
 
        if (bnx2fc_post_io_req(tgt, io_req)) {
                printk(KERN_ERR PFX "Unable to post io_req\n");
                rc = SCSI_MLQUEUE_HOST_BUSY;
-               goto exit_qcmd;
+               goto exit_qcmd_tgtlock;
        }
+
+exit_qcmd_tgtlock:
+       spin_unlock_bh(&tgt->tgt_lock);
 exit_qcmd:
        return rc;
 }
@@ -2020,6 +2026,8 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
        int task_idx, index;
        u16 xid;
 
+       /* bnx2fc_post_io_req() is called with the tgt_lock held */
+
        /* Initialize rest of io_req fields */
        io_req->cmd_type = BNX2FC_SCSI_CMD;
        io_req->port = port;
@@ -2047,9 +2055,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
        /* Build buffer descriptor list for firmware from sg list */
        if (bnx2fc_build_bd_list_from_sg(io_req)) {
                printk(KERN_ERR PFX "BD list creation failed\n");
-               spin_lock_bh(&tgt->tgt_lock);
                kref_put(&io_req->refcount, bnx2fc_cmd_release);
-               spin_unlock_bh(&tgt->tgt_lock);
                return -EAGAIN;
        }
 
@@ -2061,19 +2067,15 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
        task = &(task_page[index]);
        bnx2fc_init_task(io_req, task);
 
-       spin_lock_bh(&tgt->tgt_lock);
-
        if (tgt->flush_in_prog) {
                printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
                kref_put(&io_req->refcount, bnx2fc_cmd_release);
-               spin_unlock_bh(&tgt->tgt_lock);
                return -EAGAIN;
        }
 
        if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
                printk(KERN_ERR PFX "Session not ready...post_io\n");
                kref_put(&io_req->refcount, bnx2fc_cmd_release);
-               spin_unlock_bh(&tgt->tgt_lock);
                return -EAGAIN;
        }
 
@@ -2091,6 +2093,5 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
 
        /* Ring doorbell */
        bnx2fc_ring_doorbell(tgt);
-       spin_unlock_bh(&tgt->tgt_lock);
        return 0;
 }
index 3e0a0d315f72acc6b7088ded6d27465ff14b2840..15081257cfc881d71e0638159dea1e58006f2769 100644 (file)
@@ -828,6 +828,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
        if (status == CPL_ERR_RTX_NEG_ADVICE)
                goto rel_skb;
 
+       module_put(THIS_MODULE);
+
        if (status && status != CPL_ERR_TCAM_FULL &&
            status != CPL_ERR_CONN_EXIST &&
            status != CPL_ERR_ARP_MISS)
@@ -936,20 +938,23 @@ static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
        cxgbi_sock_get(csk);
        spin_lock_bh(&csk->lock);
 
-       if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
-               cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
-               cxgbi_sock_set_state(csk, CTP_ABORTING);
-               goto done;
+       cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
+
+       if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
+               send_tx_flowc_wr(csk);
+               cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
        }
 
-       cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
+       cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
+       cxgbi_sock_set_state(csk, CTP_ABORTING);
+
        send_abort_rpl(csk, rst_status);
 
        if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
                csk->err = abort_status_to_errno(csk, req->status, &rst_status);
                cxgbi_sock_closed(csk);
        }
-done:
+
        spin_unlock_bh(&csk->lock);
        cxgbi_sock_put(csk);
 rel_skb:
index 674d498b46ab0ba380655fa4e9649b3fabf5e49d..7da59c38a69ec97cf15507876122151222e52fe1 100644 (file)
@@ -816,7 +816,7 @@ static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk)
                read_lock_bh(&csk->callback_lock);
                if (csk->user_data)
                        iscsi_conn_failure(csk->user_data,
-                                       ISCSI_ERR_CONN_FAILED);
+                                       ISCSI_ERR_TCP_CONN_CLOSE);
                read_unlock_bh(&csk->callback_lock);
        }
 }
@@ -905,18 +905,16 @@ void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk)
 {
        cxgbi_sock_get(csk);
        spin_lock_bh(&csk->lock);
+
+       cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD);
        if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
-               if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD))
-                       cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD);
-               else {
-                       cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD);
-                       cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING);
-                       if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
-                               pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
-                                       csk, csk->state, csk->flags, csk->tid);
-                       cxgbi_sock_closed(csk);
-               }
+               cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING);
+               if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
+                       pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
+                              csk, csk->state, csk->flags, csk->tid);
+               cxgbi_sock_closed(csk);
        }
+
        spin_unlock_bh(&csk->lock);
        cxgbi_sock_put(csk);
 }
index e99507ed0e3c9ab5ad36cf19f26814d61a78ec76..fd78bdc535280a0eebcefc527221ebf091300e8f 100644 (file)
@@ -474,6 +474,13 @@ static int alua_check_sense(struct scsi_device *sdev,
                         * LUN Not Ready -- Offline
                         */
                        return SUCCESS;
+               if (sdev->allow_restart &&
+                   sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x02)
+                       /*
+                        * if the device is not started, we need to wake
+                        * the error handler to start the motor
+                        */
+                       return FAILED;
                break;
        case UNIT_ATTENTION:
                if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
index f6a69a3b1b3f0c8fb1bd805e8970beb2800feaef..5640ad1c8214eb9d12cd4f1495d20ad5c1cc1984 100644 (file)
@@ -4453,7 +4453,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
                        instance->msixentry[i].entry = i;
                i = pci_enable_msix_range(instance->pdev, instance->msixentry,
                                          1, instance->msix_vectors);
-               if (i)
+               if (i > 0)
                        instance->msix_vectors = i;
                else
                        instance->msix_vectors = 0;
index 49014a143c6a9ab56ec81a56d3c7180156341d95..c1d04d4d3c6c140457c19e50865b29bd3287d54f 100644 (file)
@@ -202,6 +202,7 @@ static struct {
        {"IOMEGA", "Io20S         *F", NULL, BLIST_KEY},
        {"INSITE", "Floptical   F*8I", NULL, BLIST_KEY},
        {"INSITE", "I325VM", NULL, BLIST_KEY},
+       {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
        {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
        {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
        {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
index 9a6f8468225f372bae07f36d4779f8c19f27c6e3..bc5ff6ff9c796bfa9101ba7d73b4e834af9d0b33 100644 (file)
@@ -459,14 +459,6 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
        if (! scsi_command_normalize_sense(scmd, &sshdr))
                return FAILED;  /* no valid sense data */
 
-       if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
-               /*
-                * nasty: for mid-layer issued TURs, we need to return the
-                * actual sense data without any recovery attempt.  For eh
-                * issued ones, we need to try to recover and interpret
-                */
-               return SUCCESS;
-
        scsi_report_sense(sdev, &sshdr);
 
        if (scsi_sense_is_deferred(&sshdr))
@@ -482,6 +474,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
                /* handler does not care. Drop down to default handling */
        }
 
+       if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
+               /*
+                * nasty: for mid-layer issued TURs, we need to return the
+                * actual sense data without any recovery attempt.  For eh
+                * issued ones, we need to try to recover and interpret
+                */
+               return SUCCESS;
+
        /*
         * Previous logic looked for FILEMARK, EOM or ILI which are
         * mainly associated with tapes and returned SUCCESS.
@@ -2001,8 +2001,10 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
         * is no point trying to lock the door of an off-line device.
         */
        shost_for_each_device(sdev, shost) {
-               if (scsi_device_online(sdev) && sdev->locked)
+               if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
                        scsi_eh_lock_door(sdev);
+                       sdev->was_reset = 0;
+               }
        }
 
        /*
index 8adf067ff019344eaf0c42b97007b4abac65e79e..1c3467b8256612b96bafaee3827312e21711de28 100644 (file)
@@ -102,7 +102,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
        clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq),
                        GFP_KERNEL);
        if (!clkfreq) {
-               dev_err(dev, "%s: no memory\n", "freq-table-hz");
                ret = -ENOMEM;
                goto out;
        }
@@ -112,19 +111,19 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
        if (ret && (ret != -EINVAL)) {
                dev_err(dev, "%s: error reading array %d\n",
                                "freq-table-hz", ret);
-               goto free_clkfreq;
+               return ret;
        }
 
        for (i = 0; i < sz; i += 2) {
                ret = of_property_read_string_index(np,
                                "clock-names", i/2, (const char **)&name);
                if (ret)
-                       goto free_clkfreq;
+                       goto out;
 
                clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
                if (!clki) {
                        ret = -ENOMEM;
-                       goto free_clkfreq;
+                       goto out;
                }
 
                clki->min_freq = clkfreq[i];
@@ -134,8 +133,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
                                clki->min_freq, clki->max_freq, clki->name);
                list_add_tail(&clki->list, &hba->clk_list_head);
        }
-free_clkfreq:
-       kfree(clkfreq);
 out:
        return ret;
 }
@@ -162,10 +159,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
        }
 
        vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
-       if (!vreg) {
-               dev_err(dev, "No memory for %s regulator\n", name);
-               goto out;
-       }
+       if (!vreg)
+               return -ENOMEM;
 
        vreg->name = kstrdup(name, GFP_KERNEL);
 
index 497c38a4a86615178e367e40666937e2d969b41f..605ca60e8a10da25bed98f9d2ac6fdb42b13176f 100644 (file)
@@ -744,6 +744,8 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
        if (!ufshcd_is_clkgating_allowed(hba))
                return;
        device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+       cancel_work_sync(&hba->clk_gating.ungate_work);
+       cancel_delayed_work_sync(&hba->clk_gating.gate_work);
 }
 
 /* Must be called with host lock acquired */
@@ -2246,6 +2248,22 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
        return ret;
 }
 
+ /**
+ * ufshcd_init_pwr_info - setting the POR (power on reset)
+ * values in hba power info
+ * @hba: per-adapter instance
+ */
+static void ufshcd_init_pwr_info(struct ufs_hba *hba)
+{
+       hba->pwr_info.gear_rx = UFS_PWM_G1;
+       hba->pwr_info.gear_tx = UFS_PWM_G1;
+       hba->pwr_info.lane_rx = 1;
+       hba->pwr_info.lane_tx = 1;
+       hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
+       hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
+       hba->pwr_info.hs_rate = 0;
+}
+
 /**
  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
  * @hba: per-adapter instance
@@ -2844,8 +2862,13 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
        hba = shost_priv(sdev->host);
        scsi_deactivate_tcq(sdev, hba->nutrs);
        /* Drop the reference as it won't be needed anymore */
-       if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN)
+       if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
+               unsigned long flags;
+
+               spin_lock_irqsave(hba->host->host_lock, flags);
                hba->sdev_ufs_device = NULL;
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+       }
 }
 
 /**
@@ -4062,6 +4085,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
 {
        int ret = 0;
+       struct scsi_device *sdev_rpmb;
+       struct scsi_device *sdev_boot;
 
        hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
                ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
@@ -4070,56 +4095,33 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
                hba->sdev_ufs_device = NULL;
                goto out;
        }
+       scsi_device_put(hba->sdev_ufs_device);
 
-       hba->sdev_boot = __scsi_add_device(hba->host, 0, 0,
+       sdev_boot = __scsi_add_device(hba->host, 0, 0,
                ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
-       if (IS_ERR(hba->sdev_boot)) {
-               ret = PTR_ERR(hba->sdev_boot);
-               hba->sdev_boot = NULL;
+       if (IS_ERR(sdev_boot)) {
+               ret = PTR_ERR(sdev_boot);
                goto remove_sdev_ufs_device;
        }
+       scsi_device_put(sdev_boot);
 
-       hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+       sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
                ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
-       if (IS_ERR(hba->sdev_rpmb)) {
-               ret = PTR_ERR(hba->sdev_rpmb);
-               hba->sdev_rpmb = NULL;
+       if (IS_ERR(sdev_rpmb)) {
+               ret = PTR_ERR(sdev_rpmb);
                goto remove_sdev_boot;
        }
+       scsi_device_put(sdev_rpmb);
        goto out;
 
 remove_sdev_boot:
-       scsi_remove_device(hba->sdev_boot);
+       scsi_remove_device(sdev_boot);
 remove_sdev_ufs_device:
        scsi_remove_device(hba->sdev_ufs_device);
 out:
        return ret;
 }
 
-/**
- * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by
- *                          ufshcd_scsi_add_wlus()
- * @hba: per-adapter instance
- *
- */
-static void ufshcd_scsi_remove_wlus(struct ufs_hba *hba)
-{
-       if (hba->sdev_ufs_device) {
-               scsi_remove_device(hba->sdev_ufs_device);
-               hba->sdev_ufs_device = NULL;
-       }
-
-       if (hba->sdev_boot) {
-               scsi_remove_device(hba->sdev_boot);
-               hba->sdev_boot = NULL;
-       }
-
-       if (hba->sdev_rpmb) {
-               scsi_remove_device(hba->sdev_rpmb);
-               hba->sdev_rpmb = NULL;
-       }
-}
-
 /**
  * ufshcd_probe_hba - probe hba to detect device and initialize
  * @hba: per-adapter instance
@@ -4134,6 +4136,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
        if (ret)
                goto out;
 
+       ufshcd_init_pwr_info(hba);
+
        /* UniPro link is active now */
        ufshcd_set_link_active(hba);
 
@@ -4264,12 +4268,18 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
                                         struct ufs_vreg *vreg)
 {
+       if (!vreg)
+               return 0;
+
        return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
 }
 
 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
                                         struct ufs_vreg *vreg)
 {
+       if (!vreg)
+               return 0;
+
        return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
 }
 
@@ -4471,7 +4481,7 @@ out:
                        if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
                                clk_disable_unprepare(clki->clk);
                }
-       } else if (!ret && on) {
+       } else if (on) {
                spin_lock_irqsave(hba->host->host_lock, flags);
                hba->clk_gating.state = CLKS_ON;
                spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -4675,11 +4685,25 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
 {
        unsigned char cmd[6] = { START_STOP };
        struct scsi_sense_hdr sshdr;
-       struct scsi_device *sdp = hba->sdev_ufs_device;
+       struct scsi_device *sdp;
+       unsigned long flags;
        int ret;
 
-       if (!sdp || !scsi_device_online(sdp))
-               return -ENODEV;
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       sdp = hba->sdev_ufs_device;
+       if (sdp) {
+               ret = scsi_device_get(sdp);
+               if (!ret && !scsi_device_online(sdp)) {
+                       ret = -ENODEV;
+                       scsi_device_put(sdp);
+               }
+       } else {
+               ret = -ENODEV;
+       }
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       if (ret)
+               return ret;
 
        /*
         * If scsi commands fail, the scsi mid-layer schedules scsi error-
@@ -4718,6 +4742,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
        if (!ret)
                hba->curr_dev_pwr_mode = pwr_mode;
 out:
+       scsi_device_put(sdp);
        hba->host->eh_noresume = 0;
        return ret;
 }
@@ -5087,7 +5112,7 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
        int ret = 0;
 
        if (!hba || !hba->is_powered)
-               goto out;
+               return 0;
 
        if (pm_runtime_suspended(hba->dev)) {
                if (hba->rpm_lvl == hba->spm_lvl)
@@ -5231,7 +5256,6 @@ EXPORT_SYMBOL(ufshcd_shutdown);
 void ufshcd_remove(struct ufs_hba *hba)
 {
        scsi_remove_host(hba->host);
-       ufshcd_scsi_remove_wlus(hba);
        /* disable interrupts */
        ufshcd_disable_intr(hba, hba->intr_mask);
        ufshcd_hba_stop(hba);
index 58ecdff5065c27d2dc3b6c563f641675799435dc..4a574aa458557a14ecc15b0d96d69dc8fa1fc9d8 100644 (file)
@@ -392,8 +392,6 @@ struct ufs_hba {
         * "UFS device" W-LU.
         */
        struct scsi_device *sdev_ufs_device;
-       struct scsi_device *sdev_rpmb;
-       struct scsi_device *sdev_boot;
 
        enum ufs_dev_pwr_mode curr_dev_pwr_mode;
        enum uic_link_state uic_link_state;
index 11a5043959dc9c10126f2a0708a1677092a7ddf8..011a3363c265322631a4021b15c05938ce51c62b 100644 (file)
@@ -31,6 +31,7 @@
 static u32 (*fuse_readl)(const unsigned int offset);
 static int fuse_size;
 struct tegra_sku_info tegra_sku_info;
+EXPORT_SYMBOL(tegra_sku_info);
 
 static const char *tegra_revision_name[TEGRA_REVISION_MAX] = {
        [TEGRA_REVISION_UNKNOWN] = "unknown",
index cea8ea3491d2c38fb1b5196b783fd5bd08c01a0c..1a07bf540fecc384ef44112e0798c1afd798e9dd 100644 (file)
@@ -26,6 +26,7 @@ static const struct of_device_id realview_soc_of_match[] = {
        { .compatible = "arm,realview-pb11mp-soc", },
        { .compatible = "arm,realview-pba8-soc", },
        { .compatible = "arm,realview-pbx-soc", },
+       { }
 };
 
 static u32 realview_coreid;
index 72e12bad14b9c478a8025db3ef7d31601c083aa4..d0d5542efc06db7a74b46a6a7230a4ce65ba53d5 100644 (file)
@@ -376,9 +376,6 @@ static void pump_transfers(unsigned long data)
        chip = dws->cur_chip;
        spi = message->spi;
 
-       if (unlikely(!chip->clk_div))
-               chip->clk_div = dws->max_freq / chip->speed_hz;
-
        if (message->state == ERROR_STATE) {
                message->status = -EIO;
                goto early_exit;
@@ -419,7 +416,7 @@ static void pump_transfers(unsigned long data)
        if (transfer->speed_hz) {
                speed = chip->speed_hz;
 
-               if (transfer->speed_hz != speed) {
+               if ((transfer->speed_hz != speed) || (!chip->clk_div)) {
                        speed = transfer->speed_hz;
 
                        /* clk_div doesn't support odd number */
@@ -581,7 +578,6 @@ static int dw_spi_setup(struct spi_device *spi)
                dev_err(&spi->dev, "No max speed HZ parameter\n");
                return -EINVAL;
        }
-       chip->speed_hz = spi->max_speed_hz;
 
        chip->tmode = 0; /* Tx & Rx */
        /* Default SPI mode is SCPOL = 0, SCPH = 0 */
index 448216025ce852ed10a95c1d2f689f512d6499bf..831ceb4a91f623e3932f3a684c6226fb35baf50c 100644 (file)
@@ -46,7 +46,7 @@
 
 #define SPI_TCR                        0x08
 
-#define SPI_CTAR(x)            (0x0c + (x * 4))
+#define SPI_CTAR(x)            (0x0c + (((x) & 0x3) * 4))
 #define SPI_CTAR_FMSZ(x)       (((x) & 0x0000000f) << 27)
 #define SPI_CTAR_CPOL(x)       ((x) << 26)
 #define SPI_CTAR_CPHA(x)       ((x) << 25)
@@ -70,7 +70,7 @@
 
 #define SPI_PUSHR              0x34
 #define SPI_PUSHR_CONT         (1 << 31)
-#define SPI_PUSHR_CTAS(x)      (((x) & 0x00000007) << 28)
+#define SPI_PUSHR_CTAS(x)      (((x) & 0x00000003) << 28)
 #define SPI_PUSHR_EOQ          (1 << 27)
 #define SPI_PUSHR_CTCNT        (1 << 26)
 #define SPI_PUSHR_PCS(x)       (((1 << x) & 0x0000003f) << 16)
index d8a105f76837682b61670567aebe9741732f0f58..9e9e0f971e6c1ff651afae9861afbaf311eebe6c 100644 (file)
@@ -1274,7 +1274,9 @@ static int pxa2xx_spi_suspend(struct device *dev)
        if (status != 0)
                return status;
        write_SSCR0(0, drv_data->ioaddr);
-       clk_disable_unprepare(ssp->clk);
+
+       if (!pm_runtime_suspended(dev))
+               clk_disable_unprepare(ssp->clk);
 
        return 0;
 }
@@ -1288,7 +1290,8 @@ static int pxa2xx_spi_resume(struct device *dev)
        pxa2xx_spi_dma_resume(drv_data);
 
        /* Enable the SSP clock */
-       clk_prepare_enable(ssp->clk);
+       if (!pm_runtime_suspended(dev))
+               clk_prepare_enable(ssp->clk);
 
        /* Restore LPSS private register bits */
        lpss_ssp_setup(drv_data);
index 39e2c0a55a2865acc6c50354cf90fa27263bc922..f63de781c72959c7c29b8fb2bb215f4e33b28f87 100644 (file)
@@ -562,9 +562,9 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
 
        sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
        txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
-                                          sspi->word_width;
+                                          (sspi->word_width >> 1);
        rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
-                                          sspi->word_width;
+                                          (sspi->word_width >> 1);
 
        if (!(spi->mode & SPI_CS_HIGH))
                regval |= SIRFSOC_SPI_CS_IDLE_STAT;
index ebcb33df2eb22facb58cebc10277c3ab12925a42..50f20f243981e68b0d007ff714476d0a29a42a80 100644 (file)
@@ -615,13 +615,13 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
                                sg_free_table(sgt);
                                return -ENOMEM;
                        }
-                       sg_buf = page_address(vm_page) +
-                               ((size_t)buf & ~PAGE_MASK);
+                       sg_set_page(&sgt->sgl[i], vm_page,
+                                   min, offset_in_page(buf));
                } else {
                        sg_buf = buf;
+                       sg_set_buf(&sgt->sgl[i], sg_buf, min);
                }
 
-               sg_set_buf(&sgt->sgl[i], sg_buf, min);
 
                buf += min;
                len -= min;
index 4690ae9a267f337aff033246b1bad339d7330a82..9425728b7eb5d2c5adf423bc42618d1dcc66127b 100644 (file)
@@ -86,8 +86,6 @@ source "drivers/staging/gdm72xx/Kconfig"
 
 source "drivers/staging/gdm724x/Kconfig"
 
-source "drivers/staging/imx-drm/Kconfig"
-
 source "drivers/staging/fwserial/Kconfig"
 
 source "drivers/staging/goldfish/Kconfig"
index c780a0e70e151ce0ce55929df910e0c01b3bc97c..bc233dd98a95f99acf7f6d865c45598c3848bed1 100644 (file)
@@ -36,7 +36,6 @@ obj-$(CONFIG_STAGING_BOARD)   += board/
 obj-$(CONFIG_USB_WPAN_HCD)     += ozwpan/
 obj-$(CONFIG_WIMAX_GDM72XX)    += gdm72xx/
 obj-$(CONFIG_LTE_GDM724X)      += gdm724x/
-obj-$(CONFIG_DRM_IMX)          += imx-drm/
 obj-$(CONFIG_FIREWIRE_SERIAL)  += fwserial/
 obj-$(CONFIG_GOLDFISH)         += goldfish/
 obj-$(CONFIG_LUSTRE_FS)                += lustre/
index 07318203a836e86155760c7ec16c595094b37489..e8c98cf570701d4c32a51269c844862aea3c5f70 100644 (file)
@@ -119,7 +119,6 @@ struct ade7758_state {
        u8                      *tx;
        u8                      *rx;
        struct mutex            buf_lock;
-       const struct iio_chan_spec *ade7758_ring_channels;
        struct spi_transfer     ring_xfer[4];
        struct spi_message      ring_msg;
        /*
index abc60067cd720272b345ca53d57e07a182b7ce87..fb373b89dcc2c5f090f0b98b3173e4ffb0d3634f 100644 (file)
@@ -634,9 +634,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_VOLTAGE,
                .indexed = 1,
                .channel = 0,
-               .extend_name = "raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                .address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
                .scan_index = 0,
                .scan_type = {
@@ -648,9 +645,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_CURRENT,
                .indexed = 1,
                .channel = 0,
-               .extend_name = "raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                .address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
                .scan_index = 1,
                .scan_type = {
@@ -662,9 +656,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 0,
-               .extend_name = "apparent_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "apparent",
                .address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
                .scan_index = 2,
                .scan_type = {
@@ -676,9 +668,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 0,
-               .extend_name = "active_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "active",
                .address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
                .scan_index = 3,
                .scan_type = {
@@ -690,9 +680,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 0,
-               .extend_name = "reactive_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "reactive",
                .address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
                .scan_index = 4,
                .scan_type = {
@@ -704,9 +692,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_VOLTAGE,
                .indexed = 1,
                .channel = 1,
-               .extend_name = "raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                .address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
                .scan_index = 5,
                .scan_type = {
@@ -718,9 +703,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_CURRENT,
                .indexed = 1,
                .channel = 1,
-               .extend_name = "raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                .address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
                .scan_index = 6,
                .scan_type = {
@@ -732,9 +714,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 1,
-               .extend_name = "apparent_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "apparent",
                .address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
                .scan_index = 7,
                .scan_type = {
@@ -746,9 +726,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 1,
-               .extend_name = "active_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "active",
                .address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
                .scan_index = 8,
                .scan_type = {
@@ -760,9 +738,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 1,
-               .extend_name = "reactive_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "reactive",
                .address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
                .scan_index = 9,
                .scan_type = {
@@ -774,9 +750,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_VOLTAGE,
                .indexed = 1,
                .channel = 2,
-               .extend_name = "raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                .address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
                .scan_index = 10,
                .scan_type = {
@@ -788,9 +761,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_CURRENT,
                .indexed = 1,
                .channel = 2,
-               .extend_name = "raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                .address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
                .scan_index = 11,
                .scan_type = {
@@ -802,9 +772,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 2,
-               .extend_name = "apparent_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "apparent",
                .address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
                .scan_index = 12,
                .scan_type = {
@@ -816,9 +784,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 2,
-               .extend_name = "active_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "active",
                .address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
                .scan_index = 13,
                .scan_type = {
@@ -830,9 +796,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
                .type = IIO_POWER,
                .indexed = 1,
                .channel = 2,
-               .extend_name = "reactive_raw",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+               .extend_name = "reactive",
                .address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
                .scan_index = 14,
                .scan_type = {
@@ -873,13 +837,14 @@ static int ade7758_probe(struct spi_device *spi)
                goto error_free_rx;
        }
        st->us = spi;
-       st->ade7758_ring_channels = &ade7758_channels[0];
        mutex_init(&st->buf_lock);
 
        indio_dev->name = spi->dev.driver->name;
        indio_dev->dev.parent = &spi->dev;
        indio_dev->info = &ade7758_info;
        indio_dev->modes = INDIO_DIRECT_MODE;
+       indio_dev->channels = ade7758_channels;
+       indio_dev->num_channels = ARRAY_SIZE(ade7758_channels);
 
        ret = ade7758_configure_ring(indio_dev);
        if (ret)
index c0accf8cce93f1fdbd3ad3a3d4213220ddb87b17..6e90064907427596e8a5ee0d00c12ea046635874 100644 (file)
@@ -85,17 +85,16 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
  **/
 static int ade7758_ring_preenable(struct iio_dev *indio_dev)
 {
-       struct ade7758_state *st = iio_priv(indio_dev);
        unsigned channel;
 
-       if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
+       if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
                return -EINVAL;
 
        channel = find_first_bit(indio_dev->active_scan_mask,
                                 indio_dev->masklength);
 
        ade7758_write_waveform_type(&indio_dev->dev,
-               st->ade7758_ring_channels[channel].address);
+               indio_dev->channels[channel].address);
 
        return 0;
 }
diff --git a/drivers/staging/imx-drm/TODO b/drivers/staging/imx-drm/TODO
deleted file mode 100644 (file)
index 29636fb..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-TODO:
-- get DRM Maintainer review for this code
-- decide where to put the base driver. It is not specific to a subsystem
-  and would be used by DRM/KMS and media/V4L2
-
-Missing features (not necessarily for moving out of staging):
-
-- Add support for IC (Image converter)
-- Add support for CSI (CMOS Sensor interface)
-- Add support for VDIC (Video Deinterlacer)
-
-Many work-in-progress patches for the above features exist. Contact
-Sascha Hauer <kernel@pengutronix.de> if you are interested in working
-on a specific feature.
-
-Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org> and
-Sascha Hauer <kernel@pengutronix.de>
index 9935e66935af191e25d1ee4fee4ff70564228f20..eddef9cd2e1662087a4595cc48382b88c121832f 100644 (file)
@@ -275,11 +275,11 @@ u8 rtw_sitesurvey_cmd(struct adapter  *padapter, struct ndis_802_11_ssid *ssid,
        if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
                rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (ph2c == NULL)
                return _FAIL;
 
-       psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_KERNEL);
+       psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
        if (psurveyPara == NULL) {
                kfree(ph2c);
                return _FAIL;
@@ -405,7 +405,7 @@ u8 rtw_joinbss_cmd(struct adapter  *padapter, struct wlan_network *pnetwork)
        else
                RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid));
 
-       pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (pcmd == NULL) {
                res = _FAIL;
                RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n"));
@@ -755,13 +755,13 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
        u8      res = _SUCCESS;
 
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (ph2c == NULL) {
                res = _FAIL;
                goto exit;
        }
 
-       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
+       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
        if (pdrvextra_cmd_parm == NULL) {
                kfree(ph2c);
                res = _FAIL;
@@ -967,13 +967,13 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
        u8      res = _SUCCESS;
 
        if (enqueue) {
-               ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+               ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
                if (ph2c == NULL) {
                        res = _FAIL;
                        goto exit;
                }
 
-               pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
+               pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
                if (pdrvextra_cmd_parm == NULL) {
                        kfree(ph2c);
                        res = _FAIL;
@@ -1010,13 +1010,13 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
 
        u8      res = _SUCCESS;
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (ph2c == NULL) {
                res = _FAIL;
                goto exit;
        }
 
-       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
+       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
        if (pdrvextra_cmd_parm == NULL) {
                kfree(ph2c);
                res = _FAIL;
@@ -1088,13 +1088,13 @@ u8 rtw_ps_cmd(struct adapter *padapter)
 
        u8      res = _SUCCESS;
 
-       ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (ppscmd == NULL) {
                res = _FAIL;
                goto exit;
        }
 
-       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
+       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
        if (pdrvextra_cmd_parm == NULL) {
                kfree(ppscmd);
                res = _FAIL;
index 5ba5099ec20d1af8fd4415b8dbeab8d3dd1fdcc8..70b1bc3e0e63333abaa5ee2a2f63e4778f3124c2 100644 (file)
@@ -4241,12 +4241,12 @@ void report_survey_event(struct adapter *padapter,
        pcmdpriv = &padapter->cmdpriv;
 
 
-       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (pcmd_obj == NULL)
                return;
 
        cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header));
-       pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
+       pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
        if (pevtcmd == NULL) {
                kfree(pcmd_obj);
                return;
@@ -4339,12 +4339,12 @@ void report_join_res(struct adapter *padapter, int res)
        struct mlme_ext_info    *pmlmeinfo = &(pmlmeext->mlmext_info);
        struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
 
-       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (pcmd_obj == NULL)
                return;
 
        cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header));
-       pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
+       pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
        if (pevtcmd == NULL) {
                kfree(pcmd_obj);
                return;
@@ -4854,11 +4854,11 @@ void survey_timer_hdl(void *function_context)
                        pmlmeext->scan_abort = false;/* reset */
                }
 
-               ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+               ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
                if (ph2c == NULL)
                        goto exit_survey_timer_hdl;
 
-               psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_KERNEL);
+               psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
                if (psurveyPara == NULL) {
                        kfree(ph2c);
                        goto exit_survey_timer_hdl;
index 33ccbbbd8ed6903fb8dd5ff61c26b9c89ca0e94c..d300369977fae5e51834ffd7f06962ea97db6195 100644 (file)
@@ -935,7 +935,7 @@ int rtw_check_bcn_info(struct adapter  *Adapter, u8 *pframe, u32 packet_len)
                return true;
        }
 
-       bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_KERNEL);
+       bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_ATOMIC);
 
        subtype = GetFrameSubType(pframe) >> 4;
 
index 407a318b09dbe2837dc64573792e6f886cc88d62..2f87150a21b7e2c1b3f09dd5df10d3085c8fda83 100644 (file)
@@ -47,6 +47,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
        {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
        {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
        {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
+       {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
        {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
        {}      /* Terminating entry */
 };
index b19e4329ba00739503215124ec768e8c99d843c3..73e58d22e325d8780d17470a9a9851a732669f89 100644 (file)
@@ -3491,7 +3491,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
                                len = sprintf(buf, "TargetAddress="
                                        "%s:%hu,%hu",
                                        inaddr_any ? conn->local_ip : np->np_ip,
-                                       inaddr_any ? conn->local_port : np->np_port,
+                                       np->np_port,
                                        tpg->tpgt);
                                len += 1;
 
index 8c60a1a1ae8dab96a63a382b572ecb7e83e1baf6..9f93b82340952d452ffe4807be7e06082c1e535a 100644 (file)
@@ -2738,7 +2738,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
        struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
        struct t10_reservation *pr_tmpl = &dev->t10_pr;
        u32 pr_res_mapped_lun = 0;
-       int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
+       int all_reg = 0, calling_it_nexus = 0;
+       bool sa_res_key_unmatched = sa_res_key != 0;
        int prh_type = 0, prh_scope = 0;
 
        if (!se_sess)
@@ -2813,6 +2814,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
                        if (!all_reg) {
                                if (pr_reg->pr_res_key != sa_res_key)
                                        continue;
+                               sa_res_key_unmatched = false;
 
                                calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
                                pr_reg_nacl = pr_reg->pr_reg_nacl;
@@ -2820,7 +2822,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
                                __core_scsi3_free_registration(dev, pr_reg,
                                        (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list :
                                                NULL, calling_it_nexus);
-                               released_regs++;
                        } else {
                                /*
                                 * Case for any existing all registrants type
@@ -2838,6 +2839,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
                                if ((sa_res_key) &&
                                     (pr_reg->pr_res_key != sa_res_key))
                                        continue;
+                               sa_res_key_unmatched = false;
 
                                calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
                                if (calling_it_nexus)
@@ -2848,7 +2850,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
                                __core_scsi3_free_registration(dev, pr_reg,
                                        (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list :
                                                NULL, 0);
-                               released_regs++;
                        }
                        if (!calling_it_nexus)
                                core_scsi3_ua_allocate(pr_reg_nacl,
@@ -2863,7 +2864,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
                 * registered reservation key, then the device server shall
                 * complete the command with RESERVATION CONFLICT status.
                 */
-               if (!released_regs) {
+               if (sa_res_key_unmatched) {
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg_n);
                        return TCM_RESERVATION_CONFLICT;
index 9ea0d5f03f7a566fbbe4191a93ad1312cbcfa5af..be877bf6f7304a88fabb3506d65ac75542471b01 100644 (file)
@@ -2292,7 +2292,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
         * and let it call back once the write buffers are ready.
         */
        target_add_to_state_list(cmd);
-       if (cmd->data_direction != DMA_TO_DEVICE) {
+       if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
                target_execute_cmd(cmd);
                return 0;
        }
index 1ab0018271c5c622c0b7556fb92a3a1d9ad38e5b..ad09e51ffae4d097109241d9a19b97c97858109b 100644 (file)
@@ -50,15 +50,14 @@ struct cpufreq_cooling_device {
        unsigned int cpufreq_state;
        unsigned int cpufreq_val;
        struct cpumask allowed_cpus;
+       struct list_head node;
 };
 static DEFINE_IDR(cpufreq_idr);
 static DEFINE_MUTEX(cooling_cpufreq_lock);
 
 static unsigned int cpufreq_dev_count;
 
-/* notify_table passes value to the CPUFREQ_ADJUST callback function. */
-#define NOTIFY_INVALID NULL
-static struct cpufreq_cooling_device *notify_device;
+static LIST_HEAD(cpufreq_dev_list);
 
 /**
  * get_idr - function to get a unique id.
@@ -287,15 +286,12 @@ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
 
        cpufreq_device->cpufreq_state = cooling_state;
        cpufreq_device->cpufreq_val = clip_freq;
-       notify_device = cpufreq_device;
 
        for_each_cpu(cpuid, mask) {
                if (is_cpufreq_valid(cpuid))
                        cpufreq_update_policy(cpuid);
        }
 
-       notify_device = NOTIFY_INVALID;
-
        return 0;
 }
 
@@ -316,21 +312,28 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
 {
        struct cpufreq_policy *policy = data;
        unsigned long max_freq = 0;
+       struct cpufreq_cooling_device *cpufreq_dev;
 
-       if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID)
+       if (event != CPUFREQ_ADJUST)
                return 0;
 
-       if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus))
-               max_freq = notify_device->cpufreq_val;
-       else
-               return 0;
+       mutex_lock(&cooling_cpufreq_lock);
+       list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+               if (!cpumask_test_cpu(policy->cpu,
+                                       &cpufreq_dev->allowed_cpus))
+                       continue;
+
+               if (!cpufreq_dev->cpufreq_val)
+                       cpufreq_dev->cpufreq_val = get_cpu_frequency(
+                                       cpumask_any(&cpufreq_dev->allowed_cpus),
+                                       cpufreq_dev->cpufreq_state);
 
-       /* Never exceed user_policy.max */
-       if (max_freq > policy->user_policy.max)
-               max_freq = policy->user_policy.max;
+               max_freq = cpufreq_dev->cpufreq_val;
 
-       if (policy->max != max_freq)
-               cpufreq_verify_within_limits(policy, 0, max_freq);
+               if (policy->max != max_freq)
+                       cpufreq_verify_within_limits(policy, 0, max_freq);
+       }
+       mutex_unlock(&cooling_cpufreq_lock);
 
        return 0;
 }
@@ -486,6 +489,7 @@ __cpufreq_cooling_register(struct device_node *np,
                cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
                                          CPUFREQ_POLICY_NOTIFIER);
        cpufreq_dev_count++;
+       list_add(&cpufreq_dev->node, &cpufreq_dev_list);
 
        mutex_unlock(&cooling_cpufreq_lock);
 
@@ -549,6 +553,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
 
        cpufreq_dev = cdev->devdata;
        mutex_lock(&cooling_cpufreq_lock);
+       list_del(&cpufreq_dev->node);
        cpufreq_dev_count--;
 
        /* Unregister the notifier for the last cpufreq cooling device */
index 461bf3d033a061833409c1fcc5d6538066d5016b..5a1f1070b702282bb1f4ded1535dbfd3abdb6963 100644 (file)
@@ -459,6 +459,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
        int measure_freq;
        int ret;
 
+       if (!cpufreq_get_current_driver()) {
+               dev_dbg(&pdev->dev, "no cpufreq driver!");
+               return -EPROBE_DEFER;
+       }
        data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
@@ -521,6 +525,30 @@ static int imx_thermal_probe(struct platform_device *pdev)
                return ret;
        }
 
+       data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(data->thermal_clk)) {
+               ret = PTR_ERR(data->thermal_clk);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "failed to get thermal clk: %d\n", ret);
+               cpufreq_cooling_unregister(data->cdev);
+               return ret;
+       }
+
+       /*
+        * Thermal sensor needs clk on to get correct value, normally
+        * we should enable its clk before taking measurement and disable
+        * clk after measurement is done, but if alarm function is enabled,
+        * hardware will auto measure the temperature periodically, so we
+        * need to keep the clk always on for alarm function.
+        */
+       ret = clk_prepare_enable(data->thermal_clk);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
+               cpufreq_cooling_unregister(data->cdev);
+               return ret;
+       }
+
        data->tz = thermal_zone_device_register("imx_thermal_zone",
                                                IMX_TRIP_NUM,
                                                BIT(IMX_TRIP_PASSIVE), data,
@@ -531,26 +559,11 @@ static int imx_thermal_probe(struct platform_device *pdev)
                ret = PTR_ERR(data->tz);
                dev_err(&pdev->dev,
                        "failed to register thermal zone device %d\n", ret);
+               clk_disable_unprepare(data->thermal_clk);
                cpufreq_cooling_unregister(data->cdev);
                return ret;
        }
 
-       data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(data->thermal_clk)) {
-               dev_warn(&pdev->dev, "failed to get thermal clk!\n");
-       } else {
-               /*
-                * Thermal sensor needs clk on to get correct value, normally
-                * we should enable its clk before taking measurement and disable
-                * clk after measurement is done, but if alarm function is enabled,
-                * hardware will auto measure the temperature periodically, so we
-                * need to keep the clk always on for alarm function.
-                */
-               ret = clk_prepare_enable(data->thermal_clk);
-               if (ret)
-                       dev_warn(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
-       }
-
        /* Enable measurements at ~ 10 Hz */
        regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
        measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
index d20dba986f0f614c19f4b2c7061f129ece26ab27..6e9fb62eb8170213149de3800cca3fa858b3a3e1 100644 (file)
@@ -92,7 +92,13 @@ static int sys_get_trip_hyst(struct thermal_zone_device *tzone,
        if (ACPI_FAILURE(status))
                return -EIO;
 
-       *temp = DECI_KELVIN_TO_MILLI_CELSIUS(hyst, KELVIN_OFFSET);
+       /*
+        * Thermal hysteresis represents a temperature difference.
+        * Kelvin and Celsius have same degree size. So the
+        * conversion here between tenths of degree Kelvin unit
+        * and Milli-Celsius unit is just to multiply 100.
+        */
+       *temp = hyst * 100;
 
        return 0;
 }
index f8eb625b8400334de00ff42903d60321f6ebeba5..62143ba3100182462861040ef3c28ce43ec45d00 100644 (file)
@@ -387,15 +387,18 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id,
                                int (*get_trend)(void *, long *))
 {
        struct device_node *np, *child, *sensor_np;
+       struct thermal_zone_device *tzd = ERR_PTR(-ENODEV);
 
        np = of_find_node_by_name(NULL, "thermal-zones");
        if (!np)
                return ERR_PTR(-ENODEV);
 
-       if (!dev || !dev->of_node)
+       if (!dev || !dev->of_node) {
+               of_node_put(np);
                return ERR_PTR(-EINVAL);
+       }
 
-       sensor_np = dev->of_node;
+       sensor_np = of_node_get(dev->of_node);
 
        for_each_child_of_node(np, child) {
                struct of_phandle_args sensor_specs;
@@ -422,16 +425,21 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id,
                }
 
                if (sensor_specs.np == sensor_np && id == sensor_id) {
-                       of_node_put(np);
-                       return thermal_zone_of_add_sensor(child, sensor_np,
-                                                         data,
-                                                         get_temp,
-                                                         get_trend);
+                       tzd = thermal_zone_of_add_sensor(child, sensor_np,
+                                                        data,
+                                                        get_temp,
+                                                        get_trend);
+                       of_node_put(sensor_specs.np);
+                       of_node_put(child);
+                       goto exit;
                }
+               of_node_put(sensor_specs.np);
        }
+exit:
+       of_node_put(sensor_np);
        of_node_put(np);
 
-       return ERR_PTR(-ENODEV);
+       return tzd;
 }
 EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_register);
 
@@ -623,6 +631,7 @@ static int thermal_of_populate_trip(struct device_node *np,
 
        /* Required for cooling map matching */
        trip->np = np;
+       of_node_get(np);
 
        return 0;
 }
@@ -730,9 +739,14 @@ finish:
        return tz;
 
 free_tbps:
+       for (i = 0; i < tz->num_tbps; i++)
+               of_node_put(tz->tbps[i].cooling_device);
        kfree(tz->tbps);
 free_trips:
+       for (i = 0; i < tz->ntrips; i++)
+               of_node_put(tz->trips[i].np);
        kfree(tz->trips);
+       of_node_put(gchild);
 free_tz:
        kfree(tz);
        of_node_put(child);
@@ -742,7 +756,13 @@ free_tz:
 
 static inline void of_thermal_free_zone(struct __thermal_zone *tz)
 {
+       int i;
+
+       for (i = 0; i < tz->num_tbps; i++)
+               of_node_put(tz->tbps[i].cooling_device);
        kfree(tz->tbps);
+       for (i = 0; i < tz->ntrips; i++)
+               of_node_put(tz->trips[i].np);
        kfree(tz->trips);
        kfree(tz);
 }
@@ -814,10 +834,13 @@ int __init of_parse_thermal_zones(void)
                        /* attempting to build remaining zones still */
                }
        }
+       of_node_put(np);
 
        return 0;
 
 exit_free:
+       of_node_put(child);
+       of_node_put(np);
        of_thermal_free_zone(tz);
 
        /* no memory available, so free what we have built */
@@ -859,4 +882,5 @@ void of_thermal_destroy_zones(void)
                kfree(zone->ops);
                of_thermal_free_zone(zone->devdata);
        }
+       of_node_put(np);
 }
index 3f5ad25ddca811cf5a9c61509af9ac8c89550b28..b6be572704a4c7ff97055f1cb273ff3016399469 100644 (file)
@@ -417,13 +417,10 @@ void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf)
 
        th_zone = sensor_conf->pzone_data;
 
-       if (th_zone->therm_dev)
-               thermal_zone_device_unregister(th_zone->therm_dev);
+       thermal_zone_device_unregister(th_zone->therm_dev);
 
-       for (i = 0; i < th_zone->cool_dev_size; i++) {
-               if (th_zone->cool_dev[i])
-                       cpufreq_cooling_unregister(th_zone->cool_dev[i]);
-       }
+       for (i = 0; i < th_zone->cool_dev_size; ++i)
+               cpufreq_cooling_unregister(th_zone->cool_dev[i]);
 
        dev_info(sensor_conf->dev,
                "Exynos: Kernel Thermal management unregistered\n");
index 3eb2ed9ea3a4db3a9c7811e06a38669cf9b8588e..158f5aa8dc5d105289e0d1b4b792fc6b28f53a83 100644 (file)
@@ -27,7 +27,7 @@
 #define SENSOR_NAME_LEN        16
 #define MAX_TRIP_COUNT 8
 #define MAX_COOLING_DEVICE 4
-#define MAX_THRESHOLD_LEVS 5
+#define MAX_TRIMINFO_CTRL_REG  2
 
 #define ACTIVE_INTERVAL 500
 #define IDLE_INTERVAL 10000
index acbff14da3a492b776c1e48c2c90461c82779f77..49c09243fd3829628bf8a53af3407c958d66cb11 100644 (file)
@@ -77,16 +77,6 @@ static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
        struct exynos_tmu_platform_data *pdata = data->pdata;
        int temp_code;
 
-       if (pdata->cal_mode == HW_MODE)
-               return temp;
-
-       if (data->soc == SOC_ARCH_EXYNOS4210)
-               /* temp should range between 25 and 125 */
-               if (temp < 25 || temp > 125) {
-                       temp_code = -EINVAL;
-                       goto out;
-               }
-
        switch (pdata->cal_type) {
        case TYPE_TWO_POINT_TRIMMING:
                temp_code = (temp - pdata->first_point_trim) *
@@ -101,7 +91,7 @@ static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
                temp_code = temp + pdata->default_temp_offset;
                break;
        }
-out:
+
        return temp_code;
 }
 
@@ -114,16 +104,6 @@ static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
        struct exynos_tmu_platform_data *pdata = data->pdata;
        int temp;
 
-       if (pdata->cal_mode == HW_MODE)
-               return temp_code;
-
-       if (data->soc == SOC_ARCH_EXYNOS4210)
-               /* temp_code should range between 75 and 175 */
-               if (temp_code < 75 || temp_code > 175) {
-                       temp = -ENODATA;
-                       goto out;
-               }
-
        switch (pdata->cal_type) {
        case TYPE_TWO_POINT_TRIMMING:
                temp = (temp_code - data->temp_error1) *
@@ -138,18 +118,35 @@ static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
                temp = temp_code - pdata->default_temp_offset;
                break;
        }
-out:
+
        return temp;
 }
 
+static void exynos_tmu_clear_irqs(struct exynos_tmu_data *data)
+{
+       const struct exynos_tmu_registers *reg = data->pdata->registers;
+       unsigned int val_irq;
+
+       val_irq = readl(data->base + reg->tmu_intstat);
+       /*
+        * Clear the interrupts.  Please note that the documentation for
+        * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly
+        * states that INTCLEAR register has a different placing of bits
+        * responsible for FALL IRQs than INTSTAT register.  Exynos5420
+        * and Exynos5440 documentation is correct (Exynos4210 doesn't
+        * support FALL IRQs at all).
+        */
+       writel(val_irq, data->base + reg->tmu_intclear);
+}
+
 static int exynos_tmu_initialize(struct platform_device *pdev)
 {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
        struct exynos_tmu_platform_data *pdata = data->pdata;
        const struct exynos_tmu_registers *reg = pdata->registers;
-       unsigned int status, trim_info = 0, con;
+       unsigned int status, trim_info = 0, con, ctrl;
        unsigned int rising_threshold = 0, falling_threshold = 0;
-       int ret = 0, threshold_code, i, trigger_levs = 0;
+       int ret = 0, threshold_code, i;
 
        mutex_lock(&data->lock);
        clk_enable(data->clk);
@@ -164,11 +161,17 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
                }
        }
 
-       if (TMU_SUPPORTS(pdata, TRIM_RELOAD))
-               __raw_writel(1, data->base + reg->triminfo_ctrl);
-
-       if (pdata->cal_mode == HW_MODE)
-               goto skip_calib_data;
+       if (TMU_SUPPORTS(pdata, TRIM_RELOAD)) {
+               for (i = 0; i < reg->triminfo_ctrl_count; i++) {
+                       if (pdata->triminfo_reload[i]) {
+                               ctrl = readl(data->base +
+                                               reg->triminfo_ctrl[i]);
+                               ctrl |= pdata->triminfo_reload[i];
+                               writel(ctrl, data->base +
+                                               reg->triminfo_ctrl[i]);
+                       }
+               }
+       }
 
        /* Save trimming info in order to perform calibration */
        if (data->soc == SOC_ARCH_EXYNOS5440) {
@@ -197,7 +200,7 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
                        trim_info = readl(data->base + reg->triminfo_data);
        }
        data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
-       data->temp_error2 = ((trim_info >> reg->triminfo_85_shift) &
+       data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) &
                                EXYNOS_TMU_TEMP_MASK);
 
        if (!data->temp_error1 ||
@@ -207,67 +210,33 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
 
        if (!data->temp_error2)
                data->temp_error2 =
-                       (pdata->efuse_value >> reg->triminfo_85_shift) &
+                       (pdata->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) &
                        EXYNOS_TMU_TEMP_MASK;
 
-skip_calib_data:
-       if (pdata->max_trigger_level > MAX_THRESHOLD_LEVS) {
-               dev_err(&pdev->dev, "Invalid max trigger level\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
-       for (i = 0; i < pdata->max_trigger_level; i++) {
-               if (!pdata->trigger_levels[i])
-                       continue;
-
-               if ((pdata->trigger_type[i] == HW_TRIP) &&
-               (!pdata->trigger_levels[pdata->max_trigger_level - 1])) {
-                       dev_err(&pdev->dev, "Invalid hw trigger level\n");
-                       ret = -EINVAL;
-                       goto out;
-               }
-
-               /* Count trigger levels except the HW trip*/
-               if (!(pdata->trigger_type[i] == HW_TRIP))
-                       trigger_levs++;
-       }
-
        rising_threshold = readl(data->base + reg->threshold_th0);
 
        if (data->soc == SOC_ARCH_EXYNOS4210) {
                /* Write temperature code for threshold */
                threshold_code = temp_to_code(data, pdata->threshold);
-               if (threshold_code < 0) {
-                       ret = threshold_code;
-                       goto out;
-               }
                writeb(threshold_code,
                        data->base + reg->threshold_temp);
-               for (i = 0; i < trigger_levs; i++)
+               for (i = 0; i < pdata->non_hw_trigger_levels; i++)
                        writeb(pdata->trigger_levels[i], data->base +
                        reg->threshold_th0 + i * sizeof(reg->threshold_th0));
 
-               writel(reg->intclr_rise_mask, data->base + reg->tmu_intclear);
+               exynos_tmu_clear_irqs(data);
        } else {
                /* Write temperature code for rising and falling threshold */
-               for (i = 0;
-               i < trigger_levs && i < EXYNOS_MAX_TRIGGER_PER_REG; i++) {
+               for (i = 0; i < pdata->non_hw_trigger_levels; i++) {
                        threshold_code = temp_to_code(data,
                                                pdata->trigger_levels[i]);
-                       if (threshold_code < 0) {
-                               ret = threshold_code;
-                               goto out;
-                       }
                        rising_threshold &= ~(0xff << 8 * i);
                        rising_threshold |= threshold_code << 8 * i;
                        if (pdata->threshold_falling) {
                                threshold_code = temp_to_code(data,
                                                pdata->trigger_levels[i] -
                                                pdata->threshold_falling);
-                               if (threshold_code > 0)
-                                       falling_threshold |=
-                                               threshold_code << 8 * i;
+                               falling_threshold |= threshold_code << 8 * i;
                        }
                }
 
@@ -276,9 +245,7 @@ skip_calib_data:
                writel(falling_threshold,
                                data->base + reg->threshold_th1);
 
-               writel((reg->intclr_rise_mask << reg->intclr_rise_shift) |
-                       (reg->intclr_fall_mask << reg->intclr_fall_shift),
-                               data->base + reg->tmu_intclear);
+               exynos_tmu_clear_irqs(data);
 
                /* if last threshold limit is also present */
                i = pdata->max_trigger_level - 1;
@@ -286,10 +253,6 @@ skip_calib_data:
                                (pdata->trigger_type[i] == HW_TRIP)) {
                        threshold_code = temp_to_code(data,
                                                pdata->trigger_levels[i]);
-                       if (threshold_code < 0) {
-                               ret = threshold_code;
-                               goto out;
-                       }
                        if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) {
                                /* 1-4 level to be assigned in th0 reg */
                                rising_threshold &= ~(0xff << 8 * i);
@@ -325,7 +288,7 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
        struct exynos_tmu_platform_data *pdata = data->pdata;
        const struct exynos_tmu_registers *reg = pdata->registers;
-       unsigned int con, interrupt_en, cal_val;
+       unsigned int con, interrupt_en;
 
        mutex_lock(&data->lock);
        clk_enable(data->clk);
@@ -335,15 +298,11 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
        if (pdata->test_mux)
                con |= (pdata->test_mux << reg->test_mux_addr_shift);
 
-       if (pdata->reference_voltage) {
-               con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
-               con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
-       }
+       con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT);
+       con |= pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT;
 
-       if (pdata->gain) {
-               con &= ~(reg->buf_slope_sel_mask << reg->buf_slope_sel_shift);
-               con |= (pdata->gain << reg->buf_slope_sel_shift);
-       }
+       con &= ~(EXYNOS_TMU_BUF_SLOPE_SEL_MASK << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
+       con |= (pdata->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
 
        if (pdata->noise_cancel_mode) {
                con &= ~(reg->therm_trip_mode_mask <<
@@ -351,29 +310,8 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
                con |= (pdata->noise_cancel_mode << reg->therm_trip_mode_shift);
        }
 
-       if (pdata->cal_mode == HW_MODE) {
-               con &= ~(reg->calib_mode_mask << reg->calib_mode_shift);
-               cal_val = 0;
-               switch (pdata->cal_type) {
-               case TYPE_TWO_POINT_TRIMMING:
-                       cal_val = 3;
-                       break;
-               case TYPE_ONE_POINT_TRIMMING_85:
-                       cal_val = 2;
-                       break;
-               case TYPE_ONE_POINT_TRIMMING_25:
-                       cal_val = 1;
-                       break;
-               case TYPE_NONE:
-                       break;
-               default:
-                       dev_err(&pdev->dev, "Invalid calibration type, using none\n");
-               }
-               con |= cal_val << reg->calib_mode_shift;
-       }
-
        if (on) {
-               con |= (1 << reg->core_en_shift);
+               con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
                interrupt_en =
                        pdata->trigger_enable[3] << reg->inten_rise3_shift |
                        pdata->trigger_enable[2] << reg->inten_rise2_shift |
@@ -383,7 +321,7 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
                        interrupt_en |=
                                interrupt_en << reg->inten_fall0_shift;
        } else {
-               con &= ~(1 << reg->core_en_shift);
+               con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
                interrupt_en = 0; /* Disable all interrupts */
        }
        writel(interrupt_en, data->base + reg->tmu_inten);
@@ -404,8 +342,16 @@ static int exynos_tmu_read(struct exynos_tmu_data *data)
        clk_enable(data->clk);
 
        temp_code = readb(data->base + reg->tmu_cur_temp);
-       temp = code_to_temp(data, temp_code);
 
+       if (data->soc == SOC_ARCH_EXYNOS4210)
+               /* temp_code should range between 75 and 175 */
+               if (temp_code < 75 || temp_code > 175) {
+                       temp = -ENODATA;
+                       goto out;
+               }
+
+       temp = code_to_temp(data, temp_code);
+out:
        clk_disable(data->clk);
        mutex_unlock(&data->lock);
 
@@ -465,7 +411,7 @@ static void exynos_tmu_work(struct work_struct *work)
                        struct exynos_tmu_data, irq_work);
        struct exynos_tmu_platform_data *pdata = data->pdata;
        const struct exynos_tmu_registers *reg = pdata->registers;
-       unsigned int val_irq, val_type;
+       unsigned int val_type;
 
        if (!IS_ERR(data->clk_sec))
                clk_enable(data->clk_sec);
@@ -483,9 +429,7 @@ static void exynos_tmu_work(struct work_struct *work)
        clk_enable(data->clk);
 
        /* TODO: take action based on particular interrupt */
-       val_irq = readl(data->base + reg->tmu_intstat);
-       /* clear the interrupts */
-       writel(val_irq, data->base + reg->tmu_intclear);
+       exynos_tmu_clear_irqs(data);
 
        clk_disable(data->clk);
        mutex_unlock(&data->lock);
index 1b4a6444ea617574ae817b157f76d47329ddb2a7..c58c7663a3feaefec4f777d47220288976c95be5 100644 (file)
@@ -34,11 +34,6 @@ enum calibration_type {
        TYPE_NONE,
 };
 
-enum calibration_mode {
-       SW_MODE,
-       HW_MODE,
-};
-
 enum soc_type {
        SOC_ARCH_EXYNOS3250 = 1,
        SOC_ARCH_EXYNOS4210,
@@ -82,46 +77,19 @@ enum soc_type {
  * bitfields. The register validity, offsets and bitfield values may vary
  * slightly across different exynos SOC's.
  * @triminfo_data: register containing 2 pont trimming data
- * @triminfo_25_shift: shift bit of the 25 C trim value in triminfo_data reg.
- * @triminfo_85_shift: shift bit of the 85 C trim value in triminfo_data reg.
  * @triminfo_ctrl: trim info controller register.
- * @triminfo_reload_shift: shift of triminfo reload enable bit in triminfo_ctrl
-       reg.
+ * @triminfo_ctrl_count: the number of trim info controller register.
  * @tmu_ctrl: TMU main controller register.
  * @test_mux_addr_shift: shift bits of test mux address.
- * @buf_vref_sel_shift: shift bits of reference voltage in tmu_ctrl register.
- * @buf_vref_sel_mask: mask bits of reference voltage in tmu_ctrl register.
  * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register.
  * @therm_trip_mode_mask: mask bits of tripping mode in tmu_ctrl register.
  * @therm_trip_en_shift: shift bits of tripping enable in tmu_ctrl register.
- * @buf_slope_sel_shift: shift bits of amplifier gain value in tmu_ctrl
-       register.
- * @buf_slope_sel_mask: mask bits of amplifier gain value in tmu_ctrl register.
- * @calib_mode_shift: shift bits of calibration mode value in tmu_ctrl
-       register.
- * @calib_mode_mask: mask bits of calibration mode value in tmu_ctrl
-       register.
- * @therm_trip_tq_en_shift: shift bits of thermal trip enable by TQ pin in
-       tmu_ctrl register.
- * @core_en_shift: shift bits of TMU core enable bit in tmu_ctrl register.
  * @tmu_status: register drescribing the TMU status.
  * @tmu_cur_temp: register containing the current temperature of the TMU.
- * @tmu_cur_temp_shift: shift bits of current temp value in tmu_cur_temp
-       register.
  * @threshold_temp: register containing the base threshold level.
  * @threshold_th0: Register containing first set of rising levels.
- * @threshold_th0_l0_shift: shift bits of level0 threshold temperature.
- * @threshold_th0_l1_shift: shift bits of level1 threshold temperature.
- * @threshold_th0_l2_shift: shift bits of level2 threshold temperature.
- * @threshold_th0_l3_shift: shift bits of level3 threshold temperature.
  * @threshold_th1: Register containing second set of rising levels.
- * @threshold_th1_l0_shift: shift bits of level0 threshold temperature.
- * @threshold_th1_l1_shift: shift bits of level1 threshold temperature.
- * @threshold_th1_l2_shift: shift bits of level2 threshold temperature.
- * @threshold_th1_l3_shift: shift bits of level3 threshold temperature.
  * @threshold_th2: Register containing third set of rising levels.
- * @threshold_th2_l0_shift: shift bits of level0 threshold temperature.
- * @threshold_th3: Register containing fourth set of rising levels.
  * @threshold_th3_l0_shift: shift bits of level0 threshold temperature.
  * @tmu_inten: register containing the different threshold interrupt
        enable bits.
@@ -130,68 +98,35 @@ enum soc_type {
  * @inten_rise2_shift: shift bits of rising 2 interrupt bits.
  * @inten_rise3_shift: shift bits of rising 3 interrupt bits.
  * @inten_fall0_shift: shift bits of falling 0 interrupt bits.
- * @inten_fall1_shift: shift bits of falling 1 interrupt bits.
- * @inten_fall2_shift: shift bits of falling 2 interrupt bits.
- * @inten_fall3_shift: shift bits of falling 3 interrupt bits.
  * @tmu_intstat: Register containing the interrupt status values.
  * @tmu_intclear: Register for clearing the raised interrupt status.
- * @intclr_fall_shift: shift bits for interrupt clear fall 0
- * @intclr_rise_shift: shift bits of all rising interrupt bits.
- * @intclr_rise_mask: mask bits of all rising interrupt bits.
- * @intclr_fall_mask: mask bits of all rising interrupt bits.
  * @emul_con: TMU emulation controller register.
  * @emul_temp_shift: shift bits of emulation temperature.
  * @emul_time_shift: shift bits of emulation time.
- * @emul_time_mask: mask bits of emulation time.
  * @tmu_irqstatus: register to find which TMU generated interrupts.
  * @tmu_pmin: register to get/set the Pmin value.
  */
 struct exynos_tmu_registers {
        u32     triminfo_data;
-       u32     triminfo_25_shift;
-       u32     triminfo_85_shift;
 
-       u32     triminfo_ctrl;
-       u32     triminfo_ctrl1;
-       u32     triminfo_reload_shift;
+       u32     triminfo_ctrl[MAX_TRIMINFO_CTRL_REG];
+       u32     triminfo_ctrl_count;
 
        u32     tmu_ctrl;
        u32     test_mux_addr_shift;
-       u32     buf_vref_sel_shift;
-       u32     buf_vref_sel_mask;
        u32     therm_trip_mode_shift;
        u32     therm_trip_mode_mask;
        u32     therm_trip_en_shift;
-       u32     buf_slope_sel_shift;
-       u32     buf_slope_sel_mask;
-       u32     calib_mode_shift;
-       u32     calib_mode_mask;
-       u32     therm_trip_tq_en_shift;
-       u32     core_en_shift;
 
        u32     tmu_status;
 
        u32     tmu_cur_temp;
-       u32     tmu_cur_temp_shift;
 
        u32     threshold_temp;
 
        u32     threshold_th0;
-       u32     threshold_th0_l0_shift;
-       u32     threshold_th0_l1_shift;
-       u32     threshold_th0_l2_shift;
-       u32     threshold_th0_l3_shift;
-
        u32     threshold_th1;
-       u32     threshold_th1_l0_shift;
-       u32     threshold_th1_l1_shift;
-       u32     threshold_th1_l2_shift;
-       u32     threshold_th1_l3_shift;
-
        u32     threshold_th2;
-       u32     threshold_th2_l0_shift;
-
-       u32     threshold_th3;
        u32     threshold_th3_l0_shift;
 
        u32     tmu_inten;
@@ -200,22 +135,14 @@ struct exynos_tmu_registers {
        u32     inten_rise2_shift;
        u32     inten_rise3_shift;
        u32     inten_fall0_shift;
-       u32     inten_fall1_shift;
-       u32     inten_fall2_shift;
-       u32     inten_fall3_shift;
 
        u32     tmu_intstat;
 
        u32     tmu_intclear;
-       u32     intclr_fall_shift;
-       u32     intclr_rise_shift;
-       u32     intclr_fall_mask;
-       u32     intclr_rise_mask;
 
        u32     emul_con;
        u32     emul_temp_shift;
        u32     emul_time_shift;
-       u32     emul_time_mask;
 
        u32     tmu_irqstatus;
        u32     tmu_pmin;
@@ -250,11 +177,12 @@ struct exynos_tmu_registers {
  *     1 = enable trigger_level[] interrupt,
  *     0 = disable trigger_level[] interrupt
  * @max_trigger_level: max trigger level supported by the TMU
+ * @non_hw_trigger_levels: number of defined non-hardware trigger levels
  * @gain: gain of amplifier in the positive-TC generator block
- *     0 <= gain <= 15
+ *     0 < gain <= 15
  * @reference_voltage: reference voltage of amplifier
  *     in the positive-TC generator block
- *     0 <= reference_voltage <= 31
+ *     0 < reference_voltage <= 31
  * @noise_cancel_mode: noise cancellation mode
  *     000, 100, 101, 110 and 111 can be different modes
  * @type: determines the type of SOC
@@ -265,8 +193,8 @@ struct exynos_tmu_registers {
  * @second_point_trim: temp value of the second point trimming
  * @default_temp_offset: default temperature offset in case of no trimming
  * @test_mux; information if SoC supports test MUX
+ * @triminfo_reload: reload value to read TRIMINFO register
  * @cal_type: calibration type for temperature
- * @cal_mode: calibration mode for temperature
  * @freq_clip_table: Table representing frequency reduction percentage.
  * @freq_tab_count: Count of the above table as frequency reduction may
  *     applicable to only some of the trigger levels.
@@ -284,6 +212,7 @@ struct exynos_tmu_platform_data {
        enum trigger_type trigger_type[MAX_TRIP_COUNT];
        bool trigger_enable[MAX_TRIP_COUNT];
        u8 max_trigger_level;
+       u8 non_hw_trigger_levels;
        u8 gain;
        u8 reference_voltage;
        u8 noise_cancel_mode;
@@ -295,9 +224,9 @@ struct exynos_tmu_platform_data {
        u8 second_point_trim;
        u8 default_temp_offset;
        u8 test_mux;
+       u8 triminfo_reload[MAX_TRIMINFO_CTRL_REG];
 
        enum calibration_type cal_type;
-       enum calibration_mode cal_mode;
        enum soc_type type;
        struct freq_clip_table freq_tab[4];
        unsigned int freq_tab_count;
index aa8e0dee2055d73c74719a912ef0595a1739ab9a..1724f6cdaef8f85603da7cbf99c225b0053dc055 100644 (file)
 #if defined(CONFIG_CPU_EXYNOS4210)
 static const struct exynos_tmu_registers exynos4210_tmu_registers = {
        .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
-       .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
-       .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
        .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
-       .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
-       .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
-       .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
-       .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
-       .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
        .tmu_status = EXYNOS_TMU_REG_STATUS,
        .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
        .threshold_temp = EXYNOS4210_TMU_REG_THRESHOLD_TEMP,
@@ -46,7 +39,6 @@ static const struct exynos_tmu_registers exynos4210_tmu_registers = {
        .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
        .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
        .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
-       .intclr_rise_mask = EXYNOS4210_TMU_TRIG_LEVEL_MASK,
 };
 
 struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
@@ -64,6 +56,7 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
                .trigger_type[1] = THROTTLE_ACTIVE,
                .trigger_type[2] = SW_TRIP,
                .max_trigger_level = 4,
+               .non_hw_trigger_levels = 3,
                .gain = 15,
                .reference_voltage = 7,
                .cal_type = TYPE_ONE_POINT_TRIMMING,
@@ -93,18 +86,14 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
 #if defined(CONFIG_SOC_EXYNOS3250)
 static const struct exynos_tmu_registers exynos3250_tmu_registers = {
        .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
-       .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
-       .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
+       .triminfo_ctrl[0] = EXYNOS_TMU_TRIMINFO_CON1,
+       .triminfo_ctrl[1] = EXYNOS_TMU_TRIMINFO_CON2,
+       .triminfo_ctrl_count = 2,
        .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
        .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT,
-       .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
-       .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
        .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
        .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
        .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
-       .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
-       .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
-       .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
        .tmu_status = EXYNOS_TMU_REG_STATUS,
        .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
        .threshold_th0 = EXYNOS_THD_TEMP_RISE,
@@ -116,14 +105,9 @@ static const struct exynos_tmu_registers exynos3250_tmu_registers = {
        .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
        .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
        .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
-       .intclr_fall_shift = EXYNOS_TMU_CLEAR_FALL_INT_SHIFT,
-       .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT,
-       .intclr_rise_mask = EXYNOS_TMU_RISE_INT_MASK,
-       .intclr_fall_mask = EXYNOS_TMU_FALL_INT_MASK,
        .emul_con = EXYNOS_EMUL_CON,
        .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
        .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
-       .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
 };
 
 #define EXYNOS3250_TMU_DATA \
@@ -141,6 +125,7 @@ static const struct exynos_tmu_registers exynos3250_tmu_registers = {
        .trigger_type[2] = SW_TRIP, \
        .trigger_type[3] = HW_TRIP, \
        .max_trigger_level = 4, \
+       .non_hw_trigger_levels = 3, \
        .gain = 8, \
        .reference_voltage = 16, \
        .noise_cancel_mode = 4, \
@@ -160,8 +145,10 @@ static const struct exynos_tmu_registers exynos3250_tmu_registers = {
                .temp_level = 95, \
        }, \
        .freq_tab_count = 2, \
+       .triminfo_reload[0] = EXYNOS_TRIMINFO_RELOAD_ENABLE, \
+       .triminfo_reload[1] = EXYNOS_TRIMINFO_RELOAD_ENABLE, \
        .registers = &exynos3250_tmu_registers, \
-       .features = (TMU_SUPPORT_EMULATION | \
+       .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
                        TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
                        TMU_SUPPORT_EMUL_TIME)
 #endif
@@ -182,20 +169,13 @@ struct exynos_tmu_init_data const exynos3250_default_tmu_data = {
 #if defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250)
 static const struct exynos_tmu_registers exynos4412_tmu_registers = {
        .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
-       .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
-       .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
-       .triminfo_ctrl = EXYNOS_TMU_TRIMINFO_CON,
-       .triminfo_reload_shift = EXYNOS_TRIMINFO_RELOAD_SHIFT,
+       .triminfo_ctrl[0] = EXYNOS_TMU_TRIMINFO_CON2,
+       .triminfo_ctrl_count = 1,
        .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
        .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT,
-       .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
-       .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
        .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
        .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
        .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
-       .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
-       .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
-       .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
        .tmu_status = EXYNOS_TMU_REG_STATUS,
        .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
        .threshold_th0 = EXYNOS_THD_TEMP_RISE,
@@ -208,14 +188,9 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = {
        .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
        .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
        .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
-       .intclr_fall_shift = EXYNOS_TMU_CLEAR_FALL_INT_SHIFT,
-       .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT,
-       .intclr_rise_mask = EXYNOS_TMU_RISE_INT_MASK,
-       .intclr_fall_mask = EXYNOS_TMU_FALL_INT_MASK,
        .emul_con = EXYNOS_EMUL_CON,
        .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
        .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
-       .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
 };
 
 #define EXYNOS4412_TMU_DATA \
@@ -233,6 +208,7 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = {
        .trigger_type[2] = SW_TRIP, \
        .trigger_type[3] = HW_TRIP, \
        .max_trigger_level = 4, \
+       .non_hw_trigger_levels = 3, \
        .gain = 8, \
        .reference_voltage = 16, \
        .noise_cancel_mode = 4, \
@@ -252,6 +228,7 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = {
                .temp_level = 95, \
        }, \
        .freq_tab_count = 2, \
+       .triminfo_reload[0] = EXYNOS_TRIMINFO_RELOAD_ENABLE, \
        .registers = &exynos4412_tmu_registers, \
        .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
                        TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
@@ -286,18 +263,10 @@ struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
 #if defined(CONFIG_SOC_EXYNOS5260)
 static const struct exynos_tmu_registers exynos5260_tmu_registers = {
        .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
-       .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
-       .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
        .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
-       .tmu_ctrl = EXYNOS_TMU_REG_CONTROL1,
-       .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
-       .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
        .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
        .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
        .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
-       .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
-       .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
-       .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
        .tmu_status = EXYNOS_TMU_REG_STATUS,
        .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
        .threshold_th0 = EXYNOS_THD_TEMP_RISE,
@@ -310,14 +279,9 @@ static const struct exynos_tmu_registers exynos5260_tmu_registers = {
        .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
        .tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT,
        .tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR,
-       .intclr_fall_shift = EXYNOS5420_TMU_CLEAR_FALL_INT_SHIFT,
-       .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT,
-       .intclr_rise_mask = EXYNOS5260_TMU_RISE_INT_MASK,
-       .intclr_fall_mask = EXYNOS5260_TMU_FALL_INT_MASK,
        .emul_con = EXYNOS5260_EMUL_CON,
        .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
        .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
-       .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
 };
 
 #define __EXYNOS5260_TMU_DATA  \
@@ -335,6 +299,7 @@ static const struct exynos_tmu_registers exynos5260_tmu_registers = {
        .trigger_type[2] = SW_TRIP, \
        .trigger_type[3] = HW_TRIP, \
        .max_trigger_level = 4, \
+       .non_hw_trigger_levels = 3, \
        .gain = 8, \
        .reference_voltage = 16, \
        .noise_cancel_mode = 4, \
@@ -359,9 +324,8 @@ static const struct exynos_tmu_registers exynos5260_tmu_registers = {
 #define EXYNOS5260_TMU_DATA \
        __EXYNOS5260_TMU_DATA \
        .type = SOC_ARCH_EXYNOS5260, \
-       .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
-                       TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
-                       TMU_SUPPORT_EMUL_TIME)
+       .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
+                       TMU_SUPPORT_READY_STATUS | TMU_SUPPORT_EMUL_TIME)
 
 struct exynos_tmu_init_data const exynos5260_default_tmu_data = {
        .tmu_data = {
@@ -378,17 +342,10 @@ struct exynos_tmu_init_data const exynos5260_default_tmu_data = {
 #if defined(CONFIG_SOC_EXYNOS5420)
 static const struct exynos_tmu_registers exynos5420_tmu_registers = {
        .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
-       .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
-       .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
        .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
-       .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
-       .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
        .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
        .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
        .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
-       .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
-       .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
-       .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
        .tmu_status = EXYNOS_TMU_REG_STATUS,
        .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
        .threshold_th0 = EXYNOS_THD_TEMP_RISE,
@@ -402,14 +359,9 @@ static const struct exynos_tmu_registers exynos5420_tmu_registers = {
        .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
        .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
        .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
-       .intclr_fall_shift = EXYNOS5420_TMU_CLEAR_FALL_INT_SHIFT,
-       .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT,
-       .intclr_rise_mask = EXYNOS_TMU_RISE_INT_MASK,
-       .intclr_fall_mask = EXYNOS_TMU_FALL_INT_MASK,
        .emul_con = EXYNOS_EMUL_CON,
        .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
        .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
-       .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
 };
 
 #define __EXYNOS5420_TMU_DATA  \
@@ -427,6 +379,7 @@ static const struct exynos_tmu_registers exynos5420_tmu_registers = {
        .trigger_type[2] = SW_TRIP, \
        .trigger_type[3] = HW_TRIP, \
        .max_trigger_level = 4, \
+       .non_hw_trigger_levels = 3, \
        .gain = 8, \
        .reference_voltage = 16, \
        .noise_cancel_mode = 4, \
@@ -451,16 +404,15 @@ static const struct exynos_tmu_registers exynos5420_tmu_registers = {
 #define EXYNOS5420_TMU_DATA \
        __EXYNOS5420_TMU_DATA \
        .type = SOC_ARCH_EXYNOS5250, \
-       .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
-                       TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
-                       TMU_SUPPORT_EMUL_TIME)
+       .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
+                       TMU_SUPPORT_READY_STATUS | TMU_SUPPORT_EMUL_TIME)
 
 #define EXYNOS5420_TMU_DATA_SHARED \
        __EXYNOS5420_TMU_DATA \
        .type = SOC_ARCH_EXYNOS5420_TRIMINFO, \
-       .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
-                       TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
-                       TMU_SUPPORT_EMUL_TIME | TMU_SUPPORT_ADDRESS_MULTIPLE)
+       .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
+                       TMU_SUPPORT_READY_STATUS | TMU_SUPPORT_EMUL_TIME | \
+                       TMU_SUPPORT_ADDRESS_MULTIPLE)
 
 struct exynos_tmu_init_data const exynos5420_default_tmu_data = {
        .tmu_data = {
@@ -477,19 +429,10 @@ struct exynos_tmu_init_data const exynos5420_default_tmu_data = {
 #if defined(CONFIG_SOC_EXYNOS5440)
 static const struct exynos_tmu_registers exynos5440_tmu_registers = {
        .triminfo_data = EXYNOS5440_TMU_S0_7_TRIM,
-       .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
-       .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
        .tmu_ctrl = EXYNOS5440_TMU_S0_7_CTRL,
-       .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
-       .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
        .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
        .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
        .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
-       .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
-       .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
-       .calib_mode_shift = EXYNOS_TMU_CALIB_MODE_SHIFT,
-       .calib_mode_mask = EXYNOS_TMU_CALIB_MODE_MASK,
-       .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
        .tmu_status = EXYNOS5440_TMU_S0_7_STATUS,
        .tmu_cur_temp = EXYNOS5440_TMU_S0_7_TEMP,
        .threshold_th0 = EXYNOS5440_TMU_S0_7_TH0,
@@ -504,10 +447,6 @@ static const struct exynos_tmu_registers exynos5440_tmu_registers = {
        .inten_fall0_shift = EXYNOS5440_TMU_INTEN_FALL0_SHIFT,
        .tmu_intstat = EXYNOS5440_TMU_S0_7_IRQ,
        .tmu_intclear = EXYNOS5440_TMU_S0_7_IRQ,
-       .intclr_fall_shift = EXYNOS5440_TMU_CLEAR_FALL_INT_SHIFT,
-       .intclr_rise_shift = EXYNOS5440_TMU_RISE_INT_SHIFT,
-       .intclr_rise_mask = EXYNOS5440_TMU_RISE_INT_MASK,
-       .intclr_fall_mask = EXYNOS5440_TMU_FALL_INT_MASK,
        .tmu_irqstatus = EXYNOS5440_TMU_IRQ_STATUS,
        .emul_con = EXYNOS5440_TMU_S0_7_DEBUG,
        .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
@@ -521,11 +460,11 @@ static const struct exynos_tmu_registers exynos5440_tmu_registers = {
        .trigger_type[0] = SW_TRIP, \
        .trigger_type[4] = HW_TRIP, \
        .max_trigger_level = 5, \
+       .non_hw_trigger_levels = 1, \
        .gain = 5, \
        .reference_voltage = 16, \
        .noise_cancel_mode = 4, \
        .cal_type = TYPE_ONE_POINT_TRIMMING, \
-       .cal_mode = 0, \
        .efuse_value = 0x5b2d, \
        .min_efuse_value = 16, \
        .max_efuse_value = 76, \
index f0979e598491cc80f3bd0c3782090b06b172e223..63de598c9c2c3f9b8804110f086ed384991f547a 100644 (file)
 #define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT 8
 #define EXYNOS_TMU_CORE_EN_SHIFT       0
 
+/* Exynos3250 specific registers */
+#define EXYNOS_TMU_TRIMINFO_CON1       0x10
+
 /* Exynos4210 specific registers */
 #define EXYNOS4210_TMU_REG_THRESHOLD_TEMP      0x44
 #define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50
-#define EXYNOS4210_TMU_REG_TRIG_LEVEL1 0x54
-#define EXYNOS4210_TMU_REG_TRIG_LEVEL2 0x58
-#define EXYNOS4210_TMU_REG_TRIG_LEVEL3 0x5C
-#define EXYNOS4210_TMU_REG_PAST_TEMP0  0x60
-#define EXYNOS4210_TMU_REG_PAST_TEMP1  0x64
-#define EXYNOS4210_TMU_REG_PAST_TEMP2  0x68
-#define EXYNOS4210_TMU_REG_PAST_TEMP3  0x6C
-
-#define EXYNOS4210_TMU_TRIG_LEVEL0_MASK        0x1
-#define EXYNOS4210_TMU_TRIG_LEVEL1_MASK        0x10
-#define EXYNOS4210_TMU_TRIG_LEVEL2_MASK        0x100
-#define EXYNOS4210_TMU_TRIG_LEVEL3_MASK        0x1000
-#define EXYNOS4210_TMU_TRIG_LEVEL_MASK 0x1111
-#define EXYNOS4210_TMU_INTCLEAR_VAL    0x1111
-
-/* Exynos5250 and Exynos4412 specific registers */
-#define EXYNOS_TMU_TRIMINFO_CON        0x14
+
+/* Exynos5250, Exynos4412, Exynos3250 specific registers */
+#define EXYNOS_TMU_TRIMINFO_CON2       0x14
 #define EXYNOS_THD_TEMP_RISE           0x50
 #define EXYNOS_THD_TEMP_FALL           0x54
 #define EXYNOS_EMUL_CON                0x80
 
-#define EXYNOS_TRIMINFO_RELOAD_SHIFT   1
+#define EXYNOS_TRIMINFO_RELOAD_ENABLE  1
 #define EXYNOS_TRIMINFO_25_SHIFT       0
 #define EXYNOS_TRIMINFO_85_SHIFT       8
-#define EXYNOS_TMU_RISE_INT_MASK       0x111
-#define EXYNOS_TMU_RISE_INT_SHIFT      0
-#define EXYNOS_TMU_FALL_INT_MASK       0x111
-#define EXYNOS_TMU_CLEAR_RISE_INT      0x111
-#define EXYNOS_TMU_CLEAR_FALL_INT      (0x111 << 12)
-#define EXYNOS_TMU_CLEAR_FALL_INT_SHIFT        12
-#define EXYNOS5420_TMU_CLEAR_FALL_INT_SHIFT    16
-#define EXYNOS5440_TMU_CLEAR_FALL_INT_SHIFT    4
 #define EXYNOS_TMU_TRIP_MODE_SHIFT     13
 #define EXYNOS_TMU_TRIP_MODE_MASK      0x7
 #define EXYNOS_TMU_THERM_TRIP_EN_SHIFT 12
-#define EXYNOS_TMU_CALIB_MODE_SHIFT    4
-#define EXYNOS_TMU_CALIB_MODE_MASK     0x3
 
 #define EXYNOS_TMU_INTEN_RISE0_SHIFT   0
 #define EXYNOS_TMU_INTEN_RISE1_SHIFT   4
 #define EXYNOS_TMU_INTEN_RISE2_SHIFT   8
 #define EXYNOS_TMU_INTEN_RISE3_SHIFT   12
 #define EXYNOS_TMU_INTEN_FALL0_SHIFT   16
-#define EXYNOS_TMU_INTEN_FALL1_SHIFT   20
-#define EXYNOS_TMU_INTEN_FALL2_SHIFT   24
-#define EXYNOS_TMU_INTEN_FALL3_SHIFT   28
 
 #define EXYNOS_EMUL_TIME       0x57F0
 #define EXYNOS_EMUL_TIME_MASK  0xffff
 #define EXYNOS_MAX_TRIGGER_PER_REG     4
 
 /* Exynos5260 specific */
-#define EXYNOS_TMU_REG_CONTROL1                        0x24
 #define EXYNOS5260_TMU_REG_INTEN               0xC0
 #define EXYNOS5260_TMU_REG_INTSTAT             0xC4
 #define EXYNOS5260_TMU_REG_INTCLEAR            0xC8
-#define EXYNOS5260_TMU_CLEAR_RISE_INT          0x1111
-#define EXYNOS5260_TMU_CLEAR_FALL_INT          (0x1111 << 16)
-#define EXYNOS5260_TMU_RISE_INT_MASK           0x1111
-#define EXYNOS5260_TMU_FALL_INT_MASK           0x1111
 #define EXYNOS5260_EMUL_CON                    0x100
 
 /* Exynos4412 specific */
 #define EXYNOS5440_TMU_S0_7_TH0                        0x110
 #define EXYNOS5440_TMU_S0_7_TH1                        0x130
 #define EXYNOS5440_TMU_S0_7_TH2                        0x150
-#define EXYNOS5440_TMU_S0_7_EVTEN              0x1F0
 #define EXYNOS5440_TMU_S0_7_IRQEN              0x210
 #define EXYNOS5440_TMU_S0_7_IRQ                        0x230
 /* exynos5440 common registers */
 #define EXYNOS5440_TMU_IRQ_STATUS              0x000
 #define EXYNOS5440_TMU_PMIN                    0x004
-#define EXYNOS5440_TMU_TEMP                    0x008
 
-#define EXYNOS5440_TMU_RISE_INT_MASK           0xf
-#define EXYNOS5440_TMU_RISE_INT_SHIFT          0
-#define EXYNOS5440_TMU_FALL_INT_MASK           0xf
 #define EXYNOS5440_TMU_INTEN_RISE0_SHIFT       0
 #define EXYNOS5440_TMU_INTEN_RISE1_SHIFT       1
 #define EXYNOS5440_TMU_INTEN_RISE2_SHIFT       2
 #define EXYNOS5440_TMU_INTEN_RISE3_SHIFT       3
 #define EXYNOS5440_TMU_INTEN_FALL0_SHIFT       4
-#define EXYNOS5440_TMU_INTEN_FALL1_SHIFT       5
-#define EXYNOS5440_TMU_INTEN_FALL2_SHIFT       6
-#define EXYNOS5440_TMU_INTEN_FALL3_SHIFT       7
-#define EXYNOS5440_TMU_TH_RISE0_SHIFT          0
-#define EXYNOS5440_TMU_TH_RISE1_SHIFT          8
-#define EXYNOS5440_TMU_TH_RISE2_SHIFT          16
-#define EXYNOS5440_TMU_TH_RISE3_SHIFT          24
 #define EXYNOS5440_TMU_TH_RISE4_SHIFT          24
 #define EXYNOS5440_EFUSE_SWAP_OFFSET           8
 
index 90163b384660247b343d9fc551dfe067128d6219..d1ec5804c0bb94cebeb22d5038b4861393047ba0 100644 (file)
@@ -275,6 +275,7 @@ int st_thermal_unregister(struct platform_device *pdev)
 }
 EXPORT_SYMBOL_GPL(st_thermal_unregister);
 
+#ifdef CONFIG_PM_SLEEP
 static int st_thermal_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -305,6 +306,8 @@ static int st_thermal_resume(struct device *dev)
 
        return 0;
 }
+#endif
+
 SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume);
 EXPORT_SYMBOL_GPL(st_thermal_pm_ops);
 
index 9bf10aa6069bbed102735da623ebfedb711e5d20..43b90709585ff294713e299cca1b6cdac406c098 100644 (file)
@@ -1575,8 +1575,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
 
        thermal_zone_device_update(tz);
 
-       if (!result)
-               return tz;
+       return tz;
 
 unregister:
        release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
index 89c4cee253e34419006b670089cc54b0257dde52..2e900a98c3e34e9d732deee15cb009db2372d3c9 100644 (file)
@@ -2413,12 +2413,17 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
 
        poll_wait(file, &tty->read_wait, wait);
        poll_wait(file, &tty->write_wait, wait);
+       if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+               mask |= POLLHUP;
        if (input_available_p(tty, 1))
                mask |= POLLIN | POLLRDNORM;
+       else if (mask & POLLHUP) {
+               tty_flush_to_ldisc(tty);
+               if (input_available_p(tty, 1))
+                       mask |= POLLIN | POLLRDNORM;
+       }
        if (tty->packet && tty->link->ctrl_status)
                mask |= POLLPRI | POLLIN | POLLRDNORM;
-       if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
-               mask |= POLLHUP;
        if (tty_hung_up_p(file))
                mask |= POLLHUP;
        if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
index 8f37d57165ecf49866b839c301fe1759c34551a9..de7aae523b3728c10abc6c4000bd9b5f44786083 100644 (file)
@@ -81,7 +81,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
                /* Set to highest baudrate supported */
                if (baud >= 1152000)
                        baud = 921600;
-               quot = DIV_ROUND_CLOSEST(port->uartclk, 256 * baud);
+               quot = (port->uartclk / (256 * baud)) + 1;
        }
 
        /*
index 8bc2563335ae34ba2ae45bf55117c808d0b219c5..bf355050eab695f50c6220589faf69a0b9e3b6b6 100644 (file)
@@ -158,7 +158,7 @@ static int of_platform_serial_probe(struct platform_device *ofdev)
        if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL))
                return -EBUSY;
 
-       info = kmalloc(sizeof(*info), GFP_KERNEL);
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
        if (info == NULL)
                return -ENOMEM;
 
@@ -240,32 +240,6 @@ static int of_platform_serial_remove(struct platform_device *ofdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int of_serial_suspend(struct device *dev)
-{
-       struct of_serial_info *info = dev_get_drvdata(dev);
-
-       serial8250_suspend_port(info->line);
-       if (info->clk)
-               clk_disable_unprepare(info->clk);
-
-       return 0;
-}
-
-static int of_serial_resume(struct device *dev)
-{
-       struct of_serial_info *info = dev_get_drvdata(dev);
-
-       if (info->clk)
-               clk_prepare_enable(info->clk);
-
-       serial8250_resume_port(info->line);
-
-       return 0;
-}
-#endif
-static SIMPLE_DEV_PM_OPS(of_serial_pm_ops, of_serial_suspend, of_serial_resume);
-
 /*
  * A few common types, add more as needed.
  */
@@ -297,7 +271,6 @@ static struct platform_driver of_platform_serial_driver = {
                .name = "of_serial",
                .owner = THIS_MODULE,
                .of_match_table = of_platform_serial_table,
-               .pm = &of_serial_pm_ops,
        },
        .probe = of_platform_serial_probe,
        .remove = of_platform_serial_remove,
index df3a8c74358e45644765da1593d19ffd44d6664a..eaeb9a02c7feace54a99df7acc6bf9544b6bc512 100644 (file)
@@ -363,7 +363,7 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
                 * The spd_hi, spd_vhi, spd_shi, spd_warp kludge...
                 * Die! Die! Die!
                 */
-               if (baud == 38400)
+               if (try == 0 && baud == 38400)
                        baud = altbaud;
 
                /*
index 16a2c0237dd60a06513e2ad1f616e13a7bd71c0b..0508a1d8e4cd73a7b679f9cc70d2e19f0716b69a 100644 (file)
@@ -1709,6 +1709,8 @@ int tty_release(struct inode *inode, struct file *filp)
        int     pty_master, tty_closing, o_tty_closing, do_sleep;
        int     idx;
        char    buf[64];
+       long    timeout = 0;
+       int     once = 1;
 
        if (tty_paranoia_check(tty, inode, __func__))
                return 0;
@@ -1789,11 +1791,18 @@ int tty_release(struct inode *inode, struct file *filp)
                if (!do_sleep)
                        break;
 
-               printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
-                               __func__, tty_name(tty, buf));
+               if (once) {
+                       once = 0;
+                       printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
+                              __func__, tty_name(tty, buf));
+               }
                tty_unlock_pair(tty, o_tty);
                mutex_unlock(&tty_mutex);
-               schedule();
+               schedule_timeout_killable(timeout);
+               if (timeout < 120 * HZ)
+                       timeout = 2 * timeout + 1;
+               else
+                       timeout = MAX_SCHEDULE_TIMEOUT;
        }
 
        /*
index 610b720d3b91789b6f55c618b08535d53edc99c3..59b25e039968596372764e9c5ddd6b92f1a8e045 100644 (file)
@@ -539,6 +539,12 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
 
        /* Save original vc_unipagdir_loc in case we allocate a new one */
        p = *vc->vc_uni_pagedir_loc;
+
+       if (!p) {
+               err = -EINVAL;
+
+               goto out_unlock;
+       }
        
        if (p->refcount > 1) {
                int j, k;
@@ -623,6 +629,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
                set_inverse_transl(vc, p, i); /* Update inverse translations */
        set_inverse_trans_unicode(vc, p);
 
+out_unlock:
        console_unlock();
        return err;
 }
index e934e19f49f57e3e463c31c65caa76cca3fee905..077d58ac3dcba6db7b4ed554cac493e74a6de71a 100644 (file)
@@ -60,6 +60,9 @@ static struct acm *acm_table[ACM_TTY_MINORS];
 
 static DEFINE_MUTEX(acm_table_lock);
 
+static void acm_tty_set_termios(struct tty_struct *tty,
+                               struct ktermios *termios_old);
+
 /*
  * acm_table accessors
  */
@@ -145,8 +148,15 @@ static int acm_ctrl_msg(struct acm *acm, int request, int value,
 /* devices aren't required to support these requests.
  * the cdc acm descriptor tells whether they do...
  */
-#define acm_set_control(acm, control) \
-       acm_ctrl_msg(acm, USB_CDC_REQ_SET_CONTROL_LINE_STATE, control, NULL, 0)
+static inline int acm_set_control(struct acm *acm, int control)
+{
+       if (acm->quirks & QUIRK_CONTROL_LINE_STATE)
+               return -EOPNOTSUPP;
+
+       return acm_ctrl_msg(acm, USB_CDC_REQ_SET_CONTROL_LINE_STATE,
+                       control, NULL, 0);
+}
+
 #define acm_set_line(acm, line) \
        acm_ctrl_msg(acm, USB_CDC_REQ_SET_LINE_CODING, 0, line, sizeof *(line))
 #define acm_send_break(acm, ms) \
@@ -554,6 +564,8 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
                goto error_submit_urb;
        }
 
+       acm_tty_set_termios(tty, NULL);
+
        /*
         * Unthrottle device in case the TTY was closed while throttled.
         */
@@ -980,11 +992,12 @@ static void acm_tty_set_termios(struct tty_struct *tty,
        /* FIXME: Needs to clear unsupported bits in the termios */
        acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
 
-       if (!newline.dwDTERate) {
+       if (C_BAUD(tty) == B0) {
                newline.dwDTERate = acm->line.dwDTERate;
                newctrl &= ~ACM_CTRL_DTR;
-       } else
+       } else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) {
                newctrl |=  ACM_CTRL_DTR;
+       }
 
        if (newctrl != acm->ctrlout)
                acm_set_control(acm, acm->ctrlout = newctrl);
@@ -1314,6 +1327,7 @@ made_compressed_probe:
        tty_port_init(&acm->port);
        acm->port.ops = &acm_port_ops;
        init_usb_anchor(&acm->delayed);
+       acm->quirks = quirks;
 
        buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
        if (!buf) {
@@ -1681,6 +1695,9 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
        .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
        },
+       { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
+       .driver_info = QUIRK_CONTROL_LINE_STATE, },
+       { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
        { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
        },
        /* Motorola H24 HSPA module: */
index fc75651afe1c9cda8983aa82dfa8b81cfa3fb5bf..d3251ebd09e2e0bc46effe8b77503c524d12a7d4 100644 (file)
@@ -121,6 +121,7 @@ struct acm {
        unsigned int throttle_req:1;                    /* throttle requested */
        u8 bInterval;
        struct usb_anchor delayed;                      /* writes queued for a device about to be woken */
+       unsigned long quirks;
 };
 
 #define CDC_DATA_INTERFACE_TYPE        0x0a
@@ -132,3 +133,4 @@ struct acm {
 #define NOT_A_MODEM                    BIT(3)
 #define NO_DATA_INTERFACE              BIT(4)
 #define IGNORE_DEVICE                  BIT(5)
+#define QUIRK_CONTROL_LINE_STATE       BIT(6)
index b84fb141e122f7f0d60d9e4ba09642c7c9e15492..a6efb4184f2b4cf0a021b457b8e1d4fcccb30d56 100644 (file)
@@ -2060,6 +2060,8 @@ int usb_alloc_streams(struct usb_interface *interface,
                return -EINVAL;
        if (dev->speed != USB_SPEED_SUPER)
                return -EINVAL;
+       if (dev->state < USB_STATE_CONFIGURED)
+               return -ENODEV;
 
        for (i = 0; i < num_eps; i++) {
                /* Streams only apply to bulk endpoints. */
index 11e80ac313244289e3a7425fb16acc6b0c0e562e..b649fef2e35d4af1e9e8a206e73c0ed799049a1b 100644 (file)
@@ -4468,9 +4468,6 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
        if (retval)
                goto fail;
 
-       if (hcd->usb_phy && !hdev->parent)
-               usb_phy_notify_connect(hcd->usb_phy, udev->speed);
-
        /*
         * Some superspeed devices have finished the link training process
         * and attached to a superspeed hub port, but the device descriptor
@@ -4627,8 +4624,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
 
        /* Disconnect any existing devices under this port */
        if (udev) {
-               if (hcd->usb_phy && !hdev->parent &&
-                               !(portstatus & USB_PORT_STAT_CONNECTION))
+               if (hcd->usb_phy && !hdev->parent)
                        usb_phy_notify_disconnect(hcd->usb_phy, udev->speed);
                usb_disconnect(&port_dev->child);
        }
@@ -4783,6 +4779,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
                                port_dev->child = NULL;
                                spin_unlock_irq(&device_state_lock);
                                mutex_unlock(&usb_port_peer_mutex);
+                       } else {
+                               if (hcd->usb_phy && !hdev->parent)
+                                       usb_phy_notify_connect(hcd->usb_phy,
+                                                       udev->speed);
                        }
                }
 
index 5ae883dc21f50f1b4e47fb68fa3255fea1940aa1..96fafed92b76b0972401a13b4eb67a5b1dfdddb1 100644 (file)
@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Creative SB Audigy 2 NX */
        { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Microsoft Wireless Laser Mouse 6000 Receiver */
+       { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Microsoft LifeCam-VX700 v2.0 */
        { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -97,6 +100,12 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x04f3, 0x0089), .driver_info =
                        USB_QUIRK_DEVICE_QUALIFIER },
 
+       { USB_DEVICE(0x04f3, 0x009b), .driver_info =
+                       USB_QUIRK_DEVICE_QUALIFIER },
+
+       { USB_DEVICE(0x04f3, 0x016f), .driver_info =
+                       USB_QUIRK_DEVICE_QUALIFIER },
+
        /* Roland SC-8820 */
        { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
 
index eee87098bb8b947458383ab3cc425458a598dae9..8b5c079c7b7dde3fb1eb0e9739e117a835c23176 100644 (file)
@@ -2327,7 +2327,7 @@ irq_retry:
 
                u32 usb_status = readl(hsotg->regs + GOTGCTL);
 
-               dev_info(hsotg->dev, "%s: USBRst\n", __func__);
+               dev_dbg(hsotg->dev, "%s: USBRst\n", __func__);
                dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
                        readl(hsotg->regs + GNPTXSTS));
 
index 711b23019d541f1fcc589e93a22d80ef677ec207..df38e7ef49761ce87f3532c37c556a1399ab8199 100644 (file)
@@ -791,6 +791,10 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
 
        trb = dwc->ep0_trb;
 
+       r = next_request(&ep0->request_list);
+       if (!r)
+               return;
+
        status = DWC3_TRB_SIZE_TRBSTS(trb->size);
        if (status == DWC3_TRBSTS_SETUP_PENDING) {
                dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
@@ -801,10 +805,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
                return;
        }
 
-       r = next_request(&ep0->request_list);
-       if (!r)
-               return;
-
        ur = &r->request;
 
        length = trb->size & DWC3_TRB_SIZE_MASK;
index a8a30b1d416761cf004bb760d14125f1a1ffa5b9..a3ca1375dd522bea2998ab4e44f5ab662217be69 100644 (file)
@@ -234,7 +234,7 @@ config USB_EHCI_SH
 
 config USB_EHCI_EXYNOS
        tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
-       depends on PLAT_S5P || ARCH_EXYNOS
+       depends on ARCH_S5PV210 || ARCH_EXYNOS
        help
        Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
 
@@ -550,7 +550,7 @@ config USB_OHCI_SH
 
 config USB_OHCI_EXYNOS
        tristate "OHCI support for Samsung S5P/EXYNOS SoC Series"
-       depends on PLAT_S5P || ARCH_EXYNOS
+       depends on ARCH_S5PV210 || ARCH_EXYNOS
        help
         Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
 
index d0d8fadf706610ca6483cd1cea19583f6fffbf1f..1db0626c8bf415289fd7e8f0875e15e4aeff404e 100644 (file)
@@ -607,7 +607,7 @@ found:
        wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr;
        if (le16_to_cpu(wa_descr->bcdWAVersion) > 0x0100)
                dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n",
-                        le16_to_cpu(wa_descr->bcdWAVersion) & 0xff00 >> 8,
+                        (le16_to_cpu(wa_descr->bcdWAVersion) & 0xff00) >> 8,
                         le16_to_cpu(wa_descr->bcdWAVersion) & 0x00ff);
        result = 0;
 error:
index 696160d48ae8521651f4f313ee9998288c245ab4..388cfd83b6b667a8e40dffc6c61d9257839f2f81 100644 (file)
@@ -22,7 +22,6 @@
 
 
 #include <linux/slab.h>
-#include <linux/device.h>
 #include <asm/unaligned.h>
 
 #include "xhci.h"
@@ -1149,9 +1148,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                 * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME
                 * is enabled, so also enable remote wake here.
                 */
-               if (hcd->self.root_hub->do_remote_wakeup
-                               && device_may_wakeup(hcd->self.controller)) {
-
+               if (hcd->self.root_hub->do_remote_wakeup) {
                        if (t1 & PORT_CONNECT) {
                                t2 |= PORT_WKOC_E | PORT_WKDISC_E;
                                t2 &= ~PORT_WKCONN_E;
index 280dde93abe528ba61e98fa4790c1fada555794f..142b601f95636fdff622bca8c4fb1a9aef87093b 100644 (file)
@@ -127,20 +127,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_SPURIOUS_REBOOT;
                xhci->quirks |= XHCI_AVOID_BEI;
        }
-       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
-           (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
-            pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) {
-               /* Workaround for occasional spurious wakeups from S5 (or
-                * any other sleep) on Haswell machines with LPT and LPT-LP
-                * with the new Intel BIOS
-                */
-               /* Limit the quirk to only known vendors, as this triggers
-                * yet another BIOS bug on some other machines
-                * https://bugzilla.kernel.org/show_bug.cgi?id=66171
-                */
-               if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
-                       xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
-       }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
                pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
                xhci->quirks |= XHCI_SPURIOUS_REBOOT;
@@ -162,6 +148,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                        pdev->device == 0x3432)
                xhci->quirks |= XHCI_BROKEN_STREAMS;
 
+       if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+                       pdev->device == 0x1042)
+               xhci->quirks |= XHCI_BROKEN_STREAMS;
+
        if (xhci->quirks & XHCI_RESET_ON_RESUME)
                xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
                                "QUIRK: Resetting on resume");
@@ -291,7 +281,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
        if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
                pdev->no_d3cold = true;
 
-       return xhci_suspend(xhci);
+       return xhci_suspend(xhci, do_wakeup);
 }
 
 static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
index 3d78b0cd674b4cd07485dfe493b3cd67d0253fdf..646300cbe5f75d34fabf3fd4d13d1100c52e97c0 100644 (file)
@@ -204,7 +204,15 @@ static int xhci_plat_suspend(struct device *dev)
        struct usb_hcd  *hcd = dev_get_drvdata(dev);
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
-       return xhci_suspend(xhci);
+       /*
+        * xhci_suspend() needs `do_wakeup` to know whether host is allowed
+        * to do wakeup during suspend. Since xhci_plat_suspend is currently
+        * only designed for system suspend, device_may_wakeup() is enough
+        * to dertermine whether host is allowed to do wakeup. Need to
+        * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
+        * also applies to runtime suspend.
+        */
+       return xhci_suspend(xhci, device_may_wakeup(dev));
 }
 
 static int xhci_plat_resume(struct device *dev)
index bc6fcbc16f61ec820ba93d5fb6700cfcbd0ae2a2..06433aec81d71511f0583a1099d42d9977f8da3b 100644 (file)
@@ -1067,9 +1067,8 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
                                false);
                xhci_ring_cmd_db(xhci);
        } else {
-               /* Clear our internal halted state and restart the ring(s) */
+               /* Clear our internal halted state */
                xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
-               ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
        }
 }
 
@@ -1823,22 +1822,13 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
                ep->stopped_td = td;
                return 0;
        } else {
-               if (trb_comp_code == COMP_STALL) {
-                       /* The transfer is completed from the driver's
-                        * perspective, but we need to issue a set dequeue
-                        * command for this stalled endpoint to move the dequeue
-                        * pointer past the TD.  We can't do that here because
-                        * the halt condition must be cleared first.  Let the
-                        * USB class driver clear the stall later.
-                        */
-                       ep->stopped_td = td;
-                       ep->stopped_stream = ep_ring->stream_id;
-               } else if (xhci_requires_manual_halt_cleanup(xhci,
-                                       ep_ctx, trb_comp_code)) {
-                       /* Other types of errors halt the endpoint, but the
-                        * class driver doesn't call usb_reset_endpoint() unless
-                        * the error is -EPIPE.  Clear the halted status in the
-                        * xHCI hardware manually.
+               if (trb_comp_code == COMP_STALL ||
+                   xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
+                                                     trb_comp_code)) {
+                       /* Issue a reset endpoint command to clear the host side
+                        * halt, followed by a set dequeue command to move the
+                        * dequeue pointer past the TD.
+                        * The class driver clears the device side halt later.
                         */
                        xhci_cleanup_halted_endpoint(xhci,
                                        slot_id, ep_index, ep_ring->stream_id,
@@ -1958,9 +1948,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
                else
                        td->urb->actual_length = 0;
 
-               xhci_cleanup_halted_endpoint(xhci,
-                       slot_id, ep_index, 0, td, event_trb);
-               return finish_td(xhci, td, event_trb, event, ep, status, true);
+               return finish_td(xhci, td, event_trb, event, ep, status, false);
        }
        /*
         * Did we transfer any data, despite the errors that might have
@@ -2519,17 +2507,8 @@ cleanup:
                if (ret) {
                        urb = td->urb;
                        urb_priv = urb->hcpriv;
-                       /* Leave the TD around for the reset endpoint function
-                        * to use(but only if it's not a control endpoint,
-                        * since we already queued the Set TR dequeue pointer
-                        * command for stalled control endpoints).
-                        */
-                       if (usb_endpoint_xfer_control(&urb->ep->desc) ||
-                               (trb_comp_code != COMP_STALL &&
-                                       trb_comp_code != COMP_BABBLE))
-                               xhci_urb_free_priv(xhci, urb_priv);
-                       else
-                               kfree(urb_priv);
+
+                       xhci_urb_free_priv(xhci, urb_priv);
 
                        usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
                        if ((urb->actual_length != urb->transfer_buffer_length &&
index 2a5d45b4cb15ef30d82294de6c5d8e015449a383..033b46c470bdff8120b1e903ee3debbb9b998218 100644 (file)
@@ -35,6 +35,8 @@
 #define DRIVER_AUTHOR "Sarah Sharp"
 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
 
+#define        PORT_WAKE_BITS  (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
+
 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
 static int link_quirk;
 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
@@ -851,13 +853,47 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
        xhci_set_cmd_ring_deq(xhci);
 }
 
+static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
+{
+       int port_index;
+       __le32 __iomem **port_array;
+       unsigned long flags;
+       u32 t1, t2;
+
+       spin_lock_irqsave(&xhci->lock, flags);
+
+       /* disble usb3 ports Wake bits*/
+       port_index = xhci->num_usb3_ports;
+       port_array = xhci->usb3_ports;
+       while (port_index--) {
+               t1 = readl(port_array[port_index]);
+               t1 = xhci_port_state_to_neutral(t1);
+               t2 = t1 & ~PORT_WAKE_BITS;
+               if (t1 != t2)
+                       writel(t2, port_array[port_index]);
+       }
+
+       /* disble usb2 ports Wake bits*/
+       port_index = xhci->num_usb2_ports;
+       port_array = xhci->usb2_ports;
+       while (port_index--) {
+               t1 = readl(port_array[port_index]);
+               t1 = xhci_port_state_to_neutral(t1);
+               t2 = t1 & ~PORT_WAKE_BITS;
+               if (t1 != t2)
+                       writel(t2, port_array[port_index]);
+       }
+
+       spin_unlock_irqrestore(&xhci->lock, flags);
+}
+
 /*
  * Stop HC (not bus-specific)
  *
  * This is called when the machine transition into S3/S4 mode.
  *
  */
-int xhci_suspend(struct xhci_hcd *xhci)
+int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
 {
        int                     rc = 0;
        unsigned int            delay = XHCI_MAX_HALT_USEC;
@@ -868,6 +904,10 @@ int xhci_suspend(struct xhci_hcd *xhci)
                        xhci->shared_hcd->state != HC_STATE_SUSPENDED)
                return -EINVAL;
 
+       /* Clear root port wake on bits if wakeup not allowed. */
+       if (!do_wakeup)
+               xhci_disable_port_wake_on_bits(xhci);
+
        /* Don't poll the roothubs on bus suspend. */
        xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
        clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -2912,68 +2952,33 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
        }
 }
 
-/* Deal with stalled endpoints.  The core should have sent the control message
- * to clear the halt condition.  However, we need to make the xHCI hardware
- * reset its sequence number, since a device will expect a sequence number of
- * zero after the halt condition is cleared.
+/* Called when clearing halted device. The core should have sent the control
+ * message to clear the device halt condition. The host side of the halt should
+ * already be cleared with a reset endpoint command issued when the STALL tx
+ * event was received.
+ *
  * Context: in_interrupt
  */
+
 void xhci_endpoint_reset(struct usb_hcd *hcd,
                struct usb_host_endpoint *ep)
 {
        struct xhci_hcd *xhci;
-       struct usb_device *udev;
-       unsigned int ep_index;
-       unsigned long flags;
-       int ret;
-       struct xhci_virt_ep *virt_ep;
-       struct xhci_command *command;
 
        xhci = hcd_to_xhci(hcd);
-       udev = (struct usb_device *) ep->hcpriv;
-       /* Called with a root hub endpoint (or an endpoint that wasn't added
-        * with xhci_add_endpoint()
-        */
-       if (!ep->hcpriv)
-               return;
-       ep_index = xhci_get_endpoint_index(&ep->desc);
-       virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
-       if (!virt_ep->stopped_td) {
-               xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
-                       "Endpoint 0x%x not halted, refusing to reset.",
-                       ep->desc.bEndpointAddress);
-               return;
-       }
-       if (usb_endpoint_xfer_control(&ep->desc)) {
-               xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
-                               "Control endpoint stall already handled.");
-               return;
-       }
 
-       command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
-       if (!command)
-               return;
-
-       xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
-                       "Queueing reset endpoint command");
-       spin_lock_irqsave(&xhci->lock, flags);
-       ret = xhci_queue_reset_ep(xhci, command, udev->slot_id, ep_index);
        /*
-        * Can't change the ring dequeue pointer until it's transitioned to the
-        * stopped state, which is only upon a successful reset endpoint
-        * command.  Better hope that last command worked!
+        * We might need to implement the config ep cmd in xhci 4.8.1 note:
+        * The Reset Endpoint Command may only be issued to endpoints in the
+        * Halted state. If software wishes reset the Data Toggle or Sequence
+        * Number of an endpoint that isn't in the Halted state, then software
+        * may issue a Configure Endpoint Command with the Drop and Add bits set
+        * for the target endpoint. that is in the Stopped state.
         */
-       if (!ret) {
-               xhci_cleanup_stalled_ring(xhci, udev, ep_index);
-               kfree(virt_ep->stopped_td);
-               xhci_ring_cmd_db(xhci);
-       }
-       virt_ep->stopped_td = NULL;
-       virt_ep->stopped_stream = 0;
-       spin_unlock_irqrestore(&xhci->lock, flags);
 
-       if (ret)
-               xhci_warn(xhci, "FIXME allocate a new ring segment\n");
+       /* For now just print debug to follow the situation */
+       xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
+                ep->desc.bEndpointAddress);
 }
 
 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
index df76d642e7190bd04854a10024c06714dd7e48d0..d745715a1e2f53648b1e1c2b1288f9c963bb42be 100644 (file)
@@ -1746,7 +1746,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
 void xhci_init_driver(struct hc_driver *drv, int (*setup_fn)(struct usb_hcd *));
 
 #ifdef CONFIG_PM
-int xhci_suspend(struct xhci_hcd *xhci);
+int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
 int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
 #else
 #define        xhci_suspend    NULL
index cfd009dc401826cc8e052325249c791bf8a959d6..6c4eb3cf5efd599653641e5d96d20b05610a6ed5 100644 (file)
@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
        { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
        { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+       { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
        { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
index 0dad8ce5a60946431e41f38683971dfc8f1dab13..1ebb351b9e9a59c9dbd90ffda56cae1769de764e 100644 (file)
@@ -470,6 +470,39 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_4701_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9300_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9301_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9302_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9303_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9304_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9305_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9306_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9307_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9308_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9309_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930A_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930B_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930C_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930D_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930E_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930F_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9310_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9311_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9312_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9313_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9314_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9315_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9316_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9317_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9318_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9319_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931A_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931B_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931C_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931D_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931E_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931F_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) },
index 6786b705ccf606ca47cb471d76b25c4b2981bf10..e52409c9be999f817cbdb627f4be8ec6f127cbe6 100644 (file)
 #define BAYER_CONTOUR_CABLE_PID        0x6001
 
 /*
- * The following are the values for the Matrix Orbital FTDI Range
- * Anything in this range will use an FT232RL.
+ * Matrix Orbital Intelligent USB displays.
+ * http://www.matrixorbital.com
  */
 #define MTXORB_VID                     0x1B3D
 #define MTXORB_FTDI_RANGE_0100_PID     0x0100
 #define MTXORB_FTDI_RANGE_01FD_PID     0x01FD
 #define MTXORB_FTDI_RANGE_01FE_PID     0x01FE
 #define MTXORB_FTDI_RANGE_01FF_PID     0x01FF
-
-
+#define MTXORB_FTDI_RANGE_4701_PID     0x4701
+#define MTXORB_FTDI_RANGE_9300_PID     0x9300
+#define MTXORB_FTDI_RANGE_9301_PID     0x9301
+#define MTXORB_FTDI_RANGE_9302_PID     0x9302
+#define MTXORB_FTDI_RANGE_9303_PID     0x9303
+#define MTXORB_FTDI_RANGE_9304_PID     0x9304
+#define MTXORB_FTDI_RANGE_9305_PID     0x9305
+#define MTXORB_FTDI_RANGE_9306_PID     0x9306
+#define MTXORB_FTDI_RANGE_9307_PID     0x9307
+#define MTXORB_FTDI_RANGE_9308_PID     0x9308
+#define MTXORB_FTDI_RANGE_9309_PID     0x9309
+#define MTXORB_FTDI_RANGE_930A_PID     0x930A
+#define MTXORB_FTDI_RANGE_930B_PID     0x930B
+#define MTXORB_FTDI_RANGE_930C_PID     0x930C
+#define MTXORB_FTDI_RANGE_930D_PID     0x930D
+#define MTXORB_FTDI_RANGE_930E_PID     0x930E
+#define MTXORB_FTDI_RANGE_930F_PID     0x930F
+#define MTXORB_FTDI_RANGE_9310_PID     0x9310
+#define MTXORB_FTDI_RANGE_9311_PID     0x9311
+#define MTXORB_FTDI_RANGE_9312_PID     0x9312
+#define MTXORB_FTDI_RANGE_9313_PID     0x9313
+#define MTXORB_FTDI_RANGE_9314_PID     0x9314
+#define MTXORB_FTDI_RANGE_9315_PID     0x9315
+#define MTXORB_FTDI_RANGE_9316_PID     0x9316
+#define MTXORB_FTDI_RANGE_9317_PID     0x9317
+#define MTXORB_FTDI_RANGE_9318_PID     0x9318
+#define MTXORB_FTDI_RANGE_9319_PID     0x9319
+#define MTXORB_FTDI_RANGE_931A_PID     0x931A
+#define MTXORB_FTDI_RANGE_931B_PID     0x931B
+#define MTXORB_FTDI_RANGE_931C_PID     0x931C
+#define MTXORB_FTDI_RANGE_931D_PID     0x931D
+#define MTXORB_FTDI_RANGE_931E_PID     0x931E
+#define MTXORB_FTDI_RANGE_931F_PID     0x931F
 
 /*
  * The Mobility Lab (TML)
index 93cb7cebda62760bcaae46f3710e7477ff59a507..077c714f1285171ee3b9e4c418e0df42f60cd42c 100644 (file)
@@ -311,24 +311,30 @@ static void       usa26_indat_callback(struct urb *urb)
                if ((data[0] & 0x80) == 0) {
                        /* no errors on individual bytes, only
                           possible overrun err */
-                       if (data[0] & RXERROR_OVERRUN)
-                               err = TTY_OVERRUN;
-                       else
-                               err = 0;
+                       if (data[0] & RXERROR_OVERRUN) {
+                               tty_insert_flip_char(&port->port, 0,
+                                                               TTY_OVERRUN);
+                       }
                        for (i = 1; i < urb->actual_length ; ++i)
-                               tty_insert_flip_char(&port->port, data[i], err);
+                               tty_insert_flip_char(&port->port, data[i],
+                                                               TTY_NORMAL);
                } else {
                        /* some bytes had errors, every byte has status */
                        dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
                        for (i = 0; i + 1 < urb->actual_length; i += 2) {
-                               int stat = data[i], flag = 0;
-                               if (stat & RXERROR_OVERRUN)
-                                       flag |= TTY_OVERRUN;
-                               if (stat & RXERROR_FRAMING)
-                                       flag |= TTY_FRAME;
-                               if (stat & RXERROR_PARITY)
-                                       flag |= TTY_PARITY;
+                               int stat = data[i];
+                               int flag = TTY_NORMAL;
+
+                               if (stat & RXERROR_OVERRUN) {
+                                       tty_insert_flip_char(&port->port, 0,
+                                                               TTY_OVERRUN);
+                               }
                                /* XXX should handle break (0x10) */
+                               if (stat & RXERROR_PARITY)
+                                       flag = TTY_PARITY;
+                               else if (stat & RXERROR_FRAMING)
+                                       flag = TTY_FRAME;
+
                                tty_insert_flip_char(&port->port, data[i+1],
                                                flag);
                        }
@@ -649,14 +655,19 @@ static void       usa49_indat_callback(struct urb *urb)
                } else {
                        /* some bytes had errors, every byte has status */
                        for (i = 0; i + 1 < urb->actual_length; i += 2) {
-                               int stat = data[i], flag = 0;
-                               if (stat & RXERROR_OVERRUN)
-                                       flag |= TTY_OVERRUN;
-                               if (stat & RXERROR_FRAMING)
-                                       flag |= TTY_FRAME;
-                               if (stat & RXERROR_PARITY)
-                                       flag |= TTY_PARITY;
+                               int stat = data[i];
+                               int flag = TTY_NORMAL;
+
+                               if (stat & RXERROR_OVERRUN) {
+                                       tty_insert_flip_char(&port->port, 0,
+                                                               TTY_OVERRUN);
+                               }
                                /* XXX should handle break (0x10) */
+                               if (stat & RXERROR_PARITY)
+                                       flag = TTY_PARITY;
+                               else if (stat & RXERROR_FRAMING)
+                                       flag = TTY_FRAME;
+
                                tty_insert_flip_char(&port->port, data[i+1],
                                                flag);
                        }
@@ -713,15 +724,19 @@ static void usa49wg_indat_callback(struct urb *urb)
                         */
                        for (x = 0; x + 1 < len &&
                                    i + 1 < urb->actual_length; x += 2) {
-                               int stat = data[i], flag = 0;
+                               int stat = data[i];
+                               int flag = TTY_NORMAL;
 
-                               if (stat & RXERROR_OVERRUN)
-                                       flag |= TTY_OVERRUN;
-                               if (stat & RXERROR_FRAMING)
-                                       flag |= TTY_FRAME;
-                               if (stat & RXERROR_PARITY)
-                                       flag |= TTY_PARITY;
+                               if (stat & RXERROR_OVERRUN) {
+                                       tty_insert_flip_char(&port->port, 0,
+                                                               TTY_OVERRUN);
+                               }
                                /* XXX should handle break (0x10) */
+                               if (stat & RXERROR_PARITY)
+                                       flag = TTY_PARITY;
+                               else if (stat & RXERROR_FRAMING)
+                                       flag = TTY_FRAME;
+
                                tty_insert_flip_char(&port->port, data[i+1],
                                                     flag);
                                i += 2;
@@ -773,25 +788,31 @@ static void usa90_indat_callback(struct urb *urb)
                        if ((data[0] & 0x80) == 0) {
                                /* no errors on individual bytes, only
                                   possible overrun err*/
-                               if (data[0] & RXERROR_OVERRUN)
-                                       err = TTY_OVERRUN;
-                               else
-                                       err = 0;
+                               if (data[0] & RXERROR_OVERRUN) {
+                                       tty_insert_flip_char(&port->port, 0,
+                                                               TTY_OVERRUN);
+                               }
                                for (i = 1; i < urb->actual_length ; ++i)
                                        tty_insert_flip_char(&port->port,
-                                                       data[i], err);
+                                                       data[i], TTY_NORMAL);
                        }  else {
                        /* some bytes had errors, every byte has status */
                                dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
                                for (i = 0; i + 1 < urb->actual_length; i += 2) {
-                                       int stat = data[i], flag = 0;
-                                       if (stat & RXERROR_OVERRUN)
-                                               flag |= TTY_OVERRUN;
-                                       if (stat & RXERROR_FRAMING)
-                                               flag |= TTY_FRAME;
-                                       if (stat & RXERROR_PARITY)
-                                               flag |= TTY_PARITY;
+                                       int stat = data[i];
+                                       int flag = TTY_NORMAL;
+
+                                       if (stat & RXERROR_OVERRUN) {
+                                               tty_insert_flip_char(
+                                                               &port->port, 0,
+                                                               TTY_OVERRUN);
+                                       }
                                        /* XXX should handle break (0x10) */
+                                       if (stat & RXERROR_PARITY)
+                                               flag = TTY_PARITY;
+                                       else if (stat & RXERROR_FRAMING)
+                                               flag = TTY_FRAME;
+
                                        tty_insert_flip_char(&port->port,
                                                        data[i+1], flag);
                                }
index 3d2bd65df0fc71e05ca330a136b8c51280f9ab3d..02c420af251e27bb2cb3c4475343b8a869ae1ef5 100644 (file)
@@ -335,7 +335,8 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
                        port->interrupt_out_urb->transfer_buffer_length = length;
 
                        priv->cur_pos = priv->cur_pos + length;
-                       result = usb_submit_urb(port->interrupt_out_urb, GFP_NOIO);
+                       result = usb_submit_urb(port->interrupt_out_urb,
+                                       GFP_ATOMIC);
                        dev_dbg(&port->dev, "%s - Send write URB returns: %i\n", __func__, result);
                        todo = priv->filled - priv->cur_pos;
 
@@ -350,7 +351,7 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
                if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
                        priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
                        result = usb_submit_urb(port->interrupt_in_urb,
-                                                               GFP_NOIO);
+                                       GFP_ATOMIC);
                        dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result);
                }
        }
index 4856fb7e637e70ffcc5f4da4cdb6536588b3aa95..4b7bfb394a32aba025e8e07ad3384759ea1723d8 100644 (file)
@@ -215,7 +215,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
 
        /* The connected devices do not have a bulk write endpoint,
         * to transmit data to de barcode device the control endpoint is used */
-       dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+       dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
        if (!dr) {
                count = -ENOMEM;
                goto error_no_dr;
index a7fe664b6b7d164e628c5b466e548efe7e10e7ec..70a098de429fc39934ef8808d3e6c5011f063352 100644 (file)
@@ -490,10 +490,9 @@ static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
                        if (*tty_flag == TTY_NORMAL)
                                *tty_flag = TTY_FRAME;
                }
-               if (lsr & UART_LSR_OE){
+               if (lsr & UART_LSR_OE) {
                        port->icount.overrun++;
-                       if (*tty_flag == TTY_NORMAL)
-                               *tty_flag = TTY_OVERRUN;
+                       tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
                }
        }
 
@@ -511,12 +510,8 @@ static void ssu100_process_read_urb(struct urb *urb)
        if ((len >= 4) &&
            (packet[0] == 0x1b) && (packet[1] == 0x1b) &&
            ((packet[2] == 0x00) || (packet[2] == 0x01))) {
-               if (packet[2] == 0x00) {
+               if (packet[2] == 0x00)
                        ssu100_update_lsr(port, packet[3], &flag);
-                       if (flag == TTY_OVERRUN)
-                               tty_insert_flip_char(&port->port, 0,
-                                               TTY_OVERRUN);
-               }
                if (packet[2] == 0x01)
                        ssu100_update_msr(port, packet[3]);
 
index 4bc2fc98636ed27f65ec8425c6a3e583e580174e..73f125e0cb587015eb282dfef0f5eaadea273ed5 100644 (file)
@@ -52,7 +52,7 @@ int usb_stor_euscsi_init(struct us_data *us)
        us->iobuf[0] = 0x1;
        result = usb_stor_control_msg(us, us->send_ctrl_pipe,
                        0x0C, USB_RECIP_INTERFACE | USB_TYPE_VENDOR,
-                       0x01, 0x0, us->iobuf, 0x1, USB_CTRL_SET_TIMEOUT);
+                       0x01, 0x0, us->iobuf, 0x1, 5 * HZ);
        usb_stor_dbg(us, "-- result is %d\n", result);
 
        return 0;
@@ -100,7 +100,7 @@ int usb_stor_huawei_e220_init(struct us_data *us)
        result = usb_stor_control_msg(us, us->send_ctrl_pipe,
                                      USB_REQ_SET_FEATURE,
                                      USB_TYPE_STANDARD | USB_RECIP_DEVICE,
-                                     0x01, 0x0, NULL, 0x0, 1000);
+                                     0x01, 0x0, NULL, 0x0, 1 * HZ);
        usb_stor_dbg(us, "Huawei mode set result is %d\n", result);
        return 0;
 }
index 8591d89a38e666b3043932c7da08a8fc4d27e350..27e4a580d2ed2fddb97e543ce5aafbd4f6280032 100644 (file)
@@ -626,6 +626,7 @@ static int config_autodelink_after_power_on(struct us_data *us)
        return 0;
 }
 
+#ifdef CONFIG_PM
 static int config_autodelink_before_power_down(struct us_data *us)
 {
        struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
@@ -716,6 +717,7 @@ static void fw5895_init(struct us_data *us)
                }
        }
 }
+#endif
 
 #ifdef CONFIG_REALTEK_AUTOPM
 static void fw5895_set_mmc_wp(struct us_data *us)
index 22c7d4360fa222722369b6861da34bafdd4b5b96..b1d815eb6d0bb34d8edeff31581eafd1ad983a0c 100644 (file)
@@ -1118,6 +1118,31 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
                 */
                if (result == USB_STOR_XFER_LONG)
                        fake_sense = 1;
+
+               /*
+                * Sometimes a device will mistakenly skip the data phase
+                * and go directly to the status phase without sending a
+                * zero-length packet.  If we get a 13-byte response here,
+                * check whether it really is a CSW.
+                */
+               if (result == USB_STOR_XFER_SHORT &&
+                               srb->sc_data_direction == DMA_FROM_DEVICE &&
+                               transfer_length - scsi_get_resid(srb) ==
+                                       US_BULK_CS_WRAP_LEN) {
+                       struct scatterlist *sg = NULL;
+                       unsigned int offset = 0;
+
+                       if (usb_stor_access_xfer_buf((unsigned char *) bcs,
+                                       US_BULK_CS_WRAP_LEN, srb, &sg,
+                                       &offset, FROM_XFER_BUF) ==
+                                               US_BULK_CS_WRAP_LEN &&
+                                       bcs->Signature ==
+                                               cpu_to_le32(US_BULK_CS_SIGN)) {
+                               usb_stor_dbg(us, "Device skipped data phase\n");
+                               scsi_set_resid(srb, transfer_length);
+                               goto skipped_data_phase;
+                       }
+               }
        }
 
        /* See flow chart on pg 15 of the Bulk Only Transport spec for
@@ -1153,6 +1178,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
        if (result != USB_STOR_XFER_GOOD)
                return USB_STOR_TRANSPORT_ERROR;
 
+ skipped_data_phase:
        /* check bulk status */
        residue = le32_to_cpu(bcs->Residue);
        usb_stor_dbg(us, "Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n",
index 8511b54a65d9665e51f1a0cc808b4535b611ba97..18a283d6de1c8bd18663b57bbf7499510c49fa2d 100644 (file)
@@ -54,6 +54,20 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_ATA_1X),
 
+/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
+               "Seagate",
+               "Expansion Desk",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_ATA_1X),
+
+/* Reported-by: Bogdan Mihalcea <bogdan.mihalcea@infim.ro> */
+UNUSUAL_DEV(0x0bc2, 0xa003, 0x0000, 0x9999,
+               "Seagate",
+               "Backup Plus",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_ATA_1X),
+
 /* https://bbs.archlinux.org/viewtopic.php?id=183190 */
 UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
                "Seagate",
@@ -61,6 +75,13 @@ UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_ATA_1X),
 
+/* https://bbs.archlinux.org/viewtopic.php?id=183190 */
+UNUSUAL_DEV(0x0bc2, 0xab21, 0x0000, 0x9999,
+               "Seagate",
+               "Backup+ BK",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_ATA_1X),
+
 /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
 UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
                "JMicron",
@@ -75,3 +96,17 @@ UNUSUAL_DEV(0x174c, 0x5106, 0x0000, 0x9999,
                "ASM1051",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_UAS),
+
+/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
+               "VIA",
+               "VL711",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_ATA_1X),
+
+/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
+               "Hitachi",
+               "External HDD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_IGNORE_UAS),
index 69906cacd04fdc8b3d5236dc030e8bcf749c541f..a17f11850669b461130610692d558dd7e499258b 100644 (file)
@@ -1312,6 +1312,7 @@ static int
 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
                        struct vhost_scsi_target *t)
 {
+       struct se_portal_group *se_tpg;
        struct tcm_vhost_tport *tv_tport;
        struct tcm_vhost_tpg *tpg;
        struct tcm_vhost_tpg **vs_tpg;
@@ -1359,6 +1360,21 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
                                ret = -EEXIST;
                                goto out;
                        }
+                       /*
+                        * In order to ensure individual vhost-scsi configfs
+                        * groups cannot be removed while in use by vhost ioctl,
+                        * go ahead and take an explicit se_tpg->tpg_group.cg_item
+                        * dependency now.
+                        */
+                       se_tpg = &tpg->se_tpg;
+                       ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
+                                                  &se_tpg->tpg_group.cg_item);
+                       if (ret) {
+                               pr_warn("configfs_depend_item() failed: %d\n", ret);
+                               kfree(vs_tpg);
+                               mutex_unlock(&tpg->tv_tpg_mutex);
+                               goto out;
+                       }
                        tpg->tv_tpg_vhost_count++;
                        tpg->vhost_scsi = vs;
                        vs_tpg[tpg->tport_tpgt] = tpg;
@@ -1401,6 +1417,7 @@ static int
 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
                          struct vhost_scsi_target *t)
 {
+       struct se_portal_group *se_tpg;
        struct tcm_vhost_tport *tv_tport;
        struct tcm_vhost_tpg *tpg;
        struct vhost_virtqueue *vq;
@@ -1449,6 +1466,13 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
                vs->vs_tpg[target] = NULL;
                match = true;
                mutex_unlock(&tpg->tv_tpg_mutex);
+               /*
+                * Release se_tpg->tpg_group.cg_item configfs dependency now
+                * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
+                */
+               se_tpg = &tpg->se_tpg;
+               configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
+                                      &se_tpg->tpg_group.cg_item);
        }
        if (match) {
                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
index 8532c3e2aea7d6455c1548e1a0cba6491ae3a5da..1626dc66e7636837a340a6faaf762df74c25df54 100644 (file)
@@ -161,7 +161,7 @@ static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
 static const struct s3c2410_wdt_variant drv_data_exynos7 = {
        .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
        .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
-       .mask_bit = 0,
+       .mask_bit = 23,
        .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
        .rst_stat_bit = 23,     /* A57 WDTRESET */
        .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
index 34a1b9dea6dda9824eda07022cb544db83066eed..da0bbb456d3fcf6fee384b9287566cb64c5ae04a 100644 (file)
@@ -104,7 +104,7 @@ obj-$(CONFIG_QNX6FS_FS)             += qnx6/
 obj-$(CONFIG_AUTOFS4_FS)       += autofs4/
 obj-$(CONFIG_ADFS_FS)          += adfs/
 obj-$(CONFIG_FUSE_FS)          += fuse/
-obj-$(CONFIG_OVERLAYFS_FS)     += overlayfs/
+obj-$(CONFIG_OVERLAY_FS)       += overlayfs/
 obj-$(CONFIG_UDF_FS)           += udf/
 obj-$(CONFIG_SUN_OPENPROMFS)   += openpromfs/
 obj-$(CONFIG_OMFS_FS)          += omfs/
index 84a751005f5b8ad0f0f5cd6e3c5ec67df5f7440c..14b93159ef83a140483bb31a4a6c70286d209ed8 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -165,6 +165,15 @@ static struct vfsmount *aio_mnt;
 static const struct file_operations aio_ring_fops;
 static const struct address_space_operations aio_ctx_aops;
 
+/* Backing dev info for aio fs.
+ * -no dirty page accounting or writeback happens
+ */
+static struct backing_dev_info aio_fs_backing_dev_info = {
+       .name           = "aiofs",
+       .state          = 0,
+       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY,
+};
+
 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
 {
        struct qstr this = QSTR_INIT("[aio]", 5);
@@ -176,6 +185,7 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
 
        inode->i_mapping->a_ops = &aio_ctx_aops;
        inode->i_mapping->private_data = ctx;
+       inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info;
        inode->i_size = PAGE_SIZE * nr_pages;
 
        path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
@@ -220,6 +230,9 @@ static int __init aio_setup(void)
        if (IS_ERR(aio_mnt))
                panic("Failed to create aio fs mount.");
 
+       if (bdi_init(&aio_fs_backing_dev_info))
+               panic("Failed to init aio fs backing dev info.");
+
        kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
        kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
 
@@ -281,11 +294,6 @@ static const struct file_operations aio_ring_fops = {
        .mmap = aio_ring_mmap,
 };
 
-static int aio_set_page_dirty(struct page *page)
-{
-       return 0;
-}
-
 #if IS_ENABLED(CONFIG_MIGRATION)
 static int aio_migratepage(struct address_space *mapping, struct page *new,
                        struct page *old, enum migrate_mode mode)
@@ -357,7 +365,7 @@ out:
 #endif
 
 static const struct address_space_operations aio_ctx_aops = {
-       .set_page_dirty = aio_set_page_dirty,
+       .set_page_dirty = __set_page_dirty_no_writeback,
 #if IS_ENABLED(CONFIG_MIGRATION)
        .migratepage    = aio_migratepage,
 #endif
@@ -412,7 +420,6 @@ static int aio_setup_ring(struct kioctx *ctx)
                pr_debug("pid(%d) page[%d]->count=%d\n",
                         current->pid, i, page_count(page));
                SetPageUptodate(page);
-               SetPageDirty(page);
                unlock_page(page);
 
                ctx->ring_pages[i] = page;
index d3220d31d3cbf0e653898d15816f5895045c06d5..dcd9be32ac579451597dcf61e97b54631a69aa68 100644 (file)
@@ -1011,8 +1011,6 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
                bytes = min(bytes, working_bytes);
                kaddr = kmap_atomic(page_out);
                memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
-               if (*pg_index == (vcnt - 1) && *pg_offset == 0)
-                       memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
                kunmap_atomic(kaddr);
                flush_dcache_page(page_out);
 
@@ -1054,3 +1052,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
 
        return 1;
 }
+
+/*
+ * When uncompressing data, we need to make sure and zero any parts of
+ * the biovec that were not filled in by the decompression code.  pg_index
+ * and pg_offset indicate the last page and the last offset of that page
+ * that have been filled in.  This will zero everything remaining in the
+ * biovec.
+ */
+void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
+                                  unsigned long pg_index,
+                                  unsigned long pg_offset)
+{
+       while (pg_index < vcnt) {
+               struct page *page = bvec[pg_index].bv_page;
+               unsigned long off = bvec[pg_index].bv_offset;
+               unsigned long len = bvec[pg_index].bv_len;
+
+               if (pg_offset < off)
+                       pg_offset = off;
+               if (pg_offset < off + len) {
+                       unsigned long bytes = off + len - pg_offset;
+                       char *kaddr;
+
+                       kaddr = kmap_atomic(page);
+                       memset(kaddr + pg_offset, 0, bytes);
+                       kunmap_atomic(kaddr);
+               }
+               pg_index++;
+               pg_offset = 0;
+       }
+}
index 0c803b4fbf93dc8062e644952abb7f36ff0e8504..d181f70caae01471ca80e181818a833b41f057de 100644 (file)
@@ -45,7 +45,9 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
                                  unsigned long nr_pages);
 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags);
-
+void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
+                                  unsigned long pg_index,
+                                  unsigned long pg_offset);
 struct btrfs_compress_op {
        struct list_head *(*alloc_workspace)(void);
 
index 19bc6162fb8e899cc51bd3d777fcbddf91589aa6..150822ee0a0b9f9668f071885c4f018b1c9f6bf3 100644 (file)
@@ -80,13 +80,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
 {
        int i;
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-       /* lockdep really cares that we take all of these spinlocks
-        * in the right order.  If any of the locks in the path are not
-        * currently blocking, it is going to complain.  So, make really
-        * really sure by forcing the path to blocking before we clear
-        * the path blocking.
-        */
        if (held) {
                btrfs_set_lock_blocking_rw(held, held_rw);
                if (held_rw == BTRFS_WRITE_LOCK)
@@ -95,7 +88,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
                        held_rw = BTRFS_READ_LOCK_BLOCKING;
        }
        btrfs_set_path_blocking(p);
-#endif
 
        for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
                if (p->nodes[i] && p->locks[i]) {
@@ -107,10 +99,8 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
                }
        }
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
        if (held)
                btrfs_clear_lock_blocking_rw(held, held_rw);
-#endif
 }
 
 /* this also releases the path */
@@ -2893,7 +2883,7 @@ cow_done:
                                        }
                                        p->locks[level] = BTRFS_WRITE_LOCK;
                                } else {
-                                       err = btrfs_try_tree_read_lock(b);
+                                       err = btrfs_tree_read_lock_atomic(b);
                                        if (!err) {
                                                btrfs_set_path_blocking(p);
                                                btrfs_tree_read_lock(b);
@@ -3025,7 +3015,7 @@ again:
                        }
 
                        level = btrfs_header_level(b);
-                       err = btrfs_try_tree_read_lock(b);
+                       err = btrfs_tree_read_lock_atomic(b);
                        if (!err) {
                                btrfs_set_path_blocking(p);
                                btrfs_tree_read_lock(b);
index 783a94355efd0be2acd75d1a84b3860ec7a28740..84a2d1868271b86b1f6a771bf5cf734a8e45c3dc 100644 (file)
@@ -413,7 +413,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
        ret = 0;
 fail:
        while (ret < 0 && !list_empty(&tmplist)) {
-               sums = list_entry(&tmplist, struct btrfs_ordered_sum, list);
+               sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
                list_del(&sums->list);
                kfree(sums);
        }
index 5665d2149249d1d83a260c74f21889e1d394a6c3..f8229ef1b46df098a3b04260c4f943db3e924d49 100644 (file)
@@ -127,6 +127,26 @@ again:
        atomic_inc(&eb->spinning_readers);
 }
 
+/*
+ * take a spinning read lock.
+ * returns 1 if we get the read lock and 0 if we don't
+ * this won't wait for blocking writers
+ */
+int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
+{
+       if (atomic_read(&eb->blocking_writers))
+               return 0;
+
+       read_lock(&eb->lock);
+       if (atomic_read(&eb->blocking_writers)) {
+               read_unlock(&eb->lock);
+               return 0;
+       }
+       atomic_inc(&eb->read_locks);
+       atomic_inc(&eb->spinning_readers);
+       return 1;
+}
+
 /*
  * returns 1 if we get the read lock and 0 if we don't
  * this won't wait for blocking writers
@@ -158,9 +178,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
            atomic_read(&eb->blocking_readers))
                return 0;
 
-       if (!write_trylock(&eb->lock))
-               return 0;
-
+       write_lock(&eb->lock);
        if (atomic_read(&eb->blocking_writers) ||
            atomic_read(&eb->blocking_readers)) {
                write_unlock(&eb->lock);
index b81e0e9a48941891681eef385d10d071f6cbe51b..c44a9d5f5362b0dcb1c525eca16b57aead8a58ae 100644 (file)
@@ -35,6 +35,8 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
 void btrfs_assert_tree_locked(struct extent_buffer *eb);
 int btrfs_try_tree_read_lock(struct extent_buffer *eb);
 int btrfs_try_tree_write_lock(struct extent_buffer *eb);
+int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
+
 
 static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
 {
index 78285f30909edd09f19cc6eb48985d47eed3c565..617553cdb7d3b36b1b8ca6a87b28526c5a060b4d 100644 (file)
@@ -373,6 +373,8 @@ cont:
        }
 done:
        kunmap(pages_in[page_in_index]);
+       if (!ret)
+               btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
        return ret;
 }
 
@@ -410,10 +412,23 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
                goto out;
        }
 
+       /*
+        * the caller is already checking against PAGE_SIZE, but lets
+        * move this check closer to the memcpy/memset
+        */
+       destlen = min_t(unsigned long, destlen, PAGE_SIZE);
        bytes = min_t(unsigned long, destlen, out_len - start_byte);
 
        kaddr = kmap_atomic(dest_page);
        memcpy(kaddr, workspace->buf + start_byte, bytes);
+
+       /*
+        * btrfs_getblock is doing a zero on the tail of the page too,
+        * but this will cover anything missing from the decompressed
+        * data.
+        */
+       if (bytes < destlen)
+               memset(kaddr+bytes, 0, destlen-bytes);
        kunmap_atomic(kaddr);
 out:
        return ret;
index 759fa4e2de8fec28d3f6448456e1e12cec1add17..fb22fd8d8fb8fad73cb4d2b63d4d525da3eea876 100644 (file)
@@ -299,6 +299,8 @@ done:
        zlib_inflateEnd(&workspace->strm);
        if (data_in)
                kunmap(pages_in[page_in_index]);
+       if (!ret)
+               btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
        return ret;
 }
 
@@ -310,10 +312,14 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
        struct workspace *workspace = list_entry(ws, struct workspace, list);
        int ret = 0;
        int wbits = MAX_WBITS;
-       unsigned long bytes_left = destlen;
+       unsigned long bytes_left;
        unsigned long total_out = 0;
+       unsigned long pg_offset = 0;
        char *kaddr;
 
+       destlen = min_t(unsigned long, destlen, PAGE_SIZE);
+       bytes_left = destlen;
+
        workspace->strm.next_in = data_in;
        workspace->strm.avail_in = srclen;
        workspace->strm.total_in = 0;
@@ -341,7 +347,6 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
                unsigned long buf_start;
                unsigned long buf_offset;
                unsigned long bytes;
-               unsigned long pg_offset = 0;
 
                ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
                if (ret != Z_OK && ret != Z_STREAM_END)
@@ -384,6 +389,17 @@ next:
                ret = 0;
 
        zlib_inflateEnd(&workspace->strm);
+
+       /*
+        * this should only happen if zlib returned fewer bytes than we
+        * expected.  btrfs_get_block is responsible for zeroing from the
+        * end of the inline extent (destlen) to the end of the page
+        */
+       if (pg_offset < destlen) {
+               kaddr = kmap_atomic(dest_page);
+               memset(kaddr + pg_offset, 0, destlen - pg_offset);
+               kunmap_atomic(kaddr);
+       }
        return ret;
 }
 
index 659f2ea9e6f74741ecbe2a7e4c322f079352fb08..cefca661464b91a4edae048a8567e900d8560df6 100644 (file)
@@ -2638,7 +2638,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
 
        for (i = 0; i < CEPH_CAP_BITS; i++)
                if ((dirty & (1 << i)) &&
-                   flush_tid == ci->i_cap_flush_tid[i])
+                   (u16)flush_tid == ci->i_cap_flush_tid[i])
                        cleaned |= 1 << i;
 
        dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
index 3ffef7f4e5cdd9d00ca454130070f4619a8707d6..5bc72b07fde22bcbb00451c7522333e2aa273323 100644 (file)
@@ -778,6 +778,7 @@ restart:
                        struct dentry *parent = lock_parent(dentry);
                        if (likely(!dentry->d_lockref.count)) {
                                __dentry_kill(dentry);
+                               dput(parent);
                                goto restart;
                        }
                        if (parent)
index 6df8d3d885e5a56374dfeb61c90a7b6d6e148e15..b8b92c2f96834baa310a20b007557f1b9e69ae91 100644 (file)
@@ -736,7 +736,12 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
        }
 
        alias = d_find_alias(inode);
-       if (alias && !vfat_d_anon_disconn(alias)) {
+       /*
+        * Checking "alias->d_parent == dentry->d_parent" to make sure
+        * FS is not corrupted (especially double linked dir).
+        */
+       if (alias && alias->d_parent == dentry->d_parent &&
+           !vfat_d_anon_disconn(alias)) {
                /*
                 * This inode has non anonymous-DCACHE_DISCONNECTED
                 * dentry. This means, the user did ->lookup() by an
@@ -755,12 +760,9 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
 
 out:
        mutex_unlock(&MSDOS_SB(sb)->s_lock);
-       dentry->d_time = dentry->d_parent->d_inode->i_version;
-       dentry = d_splice_alias(inode, dentry);
-       if (dentry)
-               dentry->d_time = dentry->d_parent->d_inode->i_version;
-       return dentry;
-
+       if (!inode)
+               dentry->d_time = dir->i_version;
+       return d_splice_alias(inode, dentry);
 error:
        mutex_unlock(&MSDOS_SB(sb)->s_lock);
        return ERR_PTR(err);
@@ -793,7 +795,6 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
        /* timestamp is already written, so mark_inode_dirty() is unneeded. */
 
-       dentry->d_time = dentry->d_parent->d_inode->i_version;
        d_instantiate(dentry, inode);
 out:
        mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -824,6 +825,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
        clear_nlink(inode);
        inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
        fat_detach(inode);
+       dentry->d_time = dir->i_version;
 out:
        mutex_unlock(&MSDOS_SB(sb)->s_lock);
 
@@ -849,6 +851,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
        clear_nlink(inode);
        inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
        fat_detach(inode);
+       dentry->d_time = dir->i_version;
 out:
        mutex_unlock(&MSDOS_SB(sb)->s_lock);
 
@@ -889,7 +892,6 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
        /* timestamp is already written, so mark_inode_dirty() is unneeded. */
 
-       dentry->d_time = dentry->d_parent->d_inode->i_version;
        d_instantiate(dentry, inode);
 
        mutex_unlock(&MSDOS_SB(sb)->s_lock);
index fe839b9151161544cd5a4f3243918b25e995b599..d67a16f2a45df8fcce56b9ff3ec56f334d951c9c 100644 (file)
@@ -170,27 +170,6 @@ struct iso9660_options{
        s32 sbsector;
 };
 
-/*
- * Compute the hash for the isofs name corresponding to the dentry.
- */
-static int
-isofs_hash_common(struct qstr *qstr, int ms)
-{
-       const char *name;
-       int len;
-
-       len = qstr->len;
-       name = qstr->name;
-       if (ms) {
-               while (len && name[len-1] == '.')
-                       len--;
-       }
-
-       qstr->hash = full_name_hash(name, len);
-
-       return 0;
-}
-
 /*
  * Compute the hash for the isofs name corresponding to the dentry.
  */
@@ -263,6 +242,27 @@ isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry,
 }
 
 #ifdef CONFIG_JOLIET
+/*
+ * Compute the hash for the isofs name corresponding to the dentry.
+ */
+static int
+isofs_hash_common(struct qstr *qstr, int ms)
+{
+       const char *name;
+       int len;
+
+       len = qstr->len;
+       name = qstr->name;
+       if (ms) {
+               while (len && name[len-1] == '.')
+                       len--;
+       }
+
+       qstr->hash = full_name_hash(name, len);
+
+       return 0;
+}
+
 static int
 isofs_hash_ms(const struct dentry *dentry, struct qstr *qstr)
 {
index e4dc74713a4328eda4738823f51e0c6070937e0a..1df94fabe4eba015bba7e6681e5c17638a27d3df 100644 (file)
@@ -1853,13 +1853,12 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
                                journal->j_chksum_driver = NULL;
                                return 0;
                        }
-               }
 
-               /* Precompute checksum seed for all metadata */
-               if (jbd2_journal_has_csum_v2or3(journal))
+                       /* Precompute checksum seed for all metadata */
                        journal->j_csum_seed = jbd2_chksum(journal, ~0,
                                                           sb->s_uuid,
                                                           sizeof(sb->s_uuid));
+               }
        }
 
        /* If enabling v1 checksums, downgrade superblock */
index 5228f201d3d53ed93966bc99e682077b9c78f107..4f46f7a05289ba957e219df793b68dfa2f037bb5 100644 (file)
@@ -378,7 +378,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
        loff_t offset = header->args.offset;
        size_t count = header->args.count;
        struct page **pages = header->args.pages;
-       int pg_index = pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
+       int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
        unsigned int pg_len;
        struct blk_plug plug;
        int i;
index e966c023b1b74df4161461a7cad843e6b9bcfbda..acbf9ca4018ccb6c7cd3bf6d6281ae566cbe4448 100644 (file)
@@ -65,17 +65,18 @@ bl_resolve_deviceid(struct nfs_server *server, struct pnfs_block_volume *b,
 
        dprintk("%s CREATING PIPEFS MESSAGE\n", __func__);
 
+       mutex_lock(&nn->bl_mutex);
        bl_pipe_msg.bl_wq = &nn->bl_wq;
 
        b->simple.len += 4;     /* single volume */
        if (b->simple.len > PAGE_SIZE)
-               return -EIO;
+               goto out_unlock;
 
        memset(msg, 0, sizeof(*msg));
        msg->len = sizeof(*bl_msg) + b->simple.len;
        msg->data = kzalloc(msg->len, gfp_mask);
        if (!msg->data)
-               goto out;
+               goto out_free_data;
 
        bl_msg = msg->data;
        bl_msg->type = BL_DEVICE_MOUNT,
@@ -87,7 +88,7 @@ bl_resolve_deviceid(struct nfs_server *server, struct pnfs_block_volume *b,
        rc = rpc_queue_upcall(nn->bl_device_pipe, msg);
        if (rc < 0) {
                remove_wait_queue(&nn->bl_wq, &wq);
-               goto out;
+               goto out_free_data;
        }
 
        set_current_state(TASK_UNINTERRUPTIBLE);
@@ -97,12 +98,14 @@ bl_resolve_deviceid(struct nfs_server *server, struct pnfs_block_volume *b,
        if (reply->status != BL_DEVICE_REQUEST_PROC) {
                printk(KERN_WARNING "%s failed to decode device: %d\n",
                        __func__, reply->status);
-               goto out;
+               goto out_free_data;
        }
 
        dev = MKDEV(reply->major, reply->minor);
-out:
+out_free_data:
        kfree(msg->data);
+out_unlock:
+       mutex_unlock(&nn->bl_mutex);
        return dev;
 }
 
@@ -232,6 +235,7 @@ static int nfs4blocklayout_net_init(struct net *net)
        struct nfs_net *nn = net_generic(net, nfs_net_id);
        struct dentry *dentry;
 
+       mutex_init(&nn->bl_mutex);
        init_waitqueue_head(&nn->bl_wq);
        nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
        if (IS_ERR(nn->bl_device_pipe))
index 5853f53db73246df670ce9daedb73e10d62d2da3..7f3f60641344437ab4f493eebf6093e39957c3be 100644 (file)
@@ -125,6 +125,8 @@ again:
                        continue;
                if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
                        continue;
+               if (!nfs4_valid_open_stateid(state))
+                       continue;
                if (!nfs4_stateid_match(&state->stateid, stateid))
                        continue;
                get_nfs_open_context(ctx);
@@ -193,7 +195,11 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *
 {
        int res = 0;
 
-       res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync);
+       if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
+               res = nfs4_proc_delegreturn(inode,
+                               delegation->cred,
+                               &delegation->stateid,
+                               issync);
        nfs_free_delegation(delegation);
        return res;
 }
@@ -380,11 +386,13 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
 {
        struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
        struct nfs_inode *nfsi = NFS_I(inode);
-       int err;
+       int err = 0;
 
        if (delegation == NULL)
                return 0;
        do {
+               if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
+                       break;
                err = nfs_delegation_claim_opens(inode, &delegation->stateid);
                if (!issync || err != -EAGAIN)
                        break;
@@ -605,10 +613,23 @@ static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *cl
        rcu_read_unlock();
 }
 
+static void nfs_revoke_delegation(struct inode *inode)
+{
+       struct nfs_delegation *delegation;
+       rcu_read_lock();
+       delegation = rcu_dereference(NFS_I(inode)->delegation);
+       if (delegation != NULL) {
+               set_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
+               nfs_mark_return_delegation(NFS_SERVER(inode), delegation);
+       }
+       rcu_read_unlock();
+}
+
 void nfs_remove_bad_delegation(struct inode *inode)
 {
        struct nfs_delegation *delegation;
 
+       nfs_revoke_delegation(inode);
        delegation = nfs_inode_detach_delegation(inode);
        if (delegation) {
                nfs_inode_find_state_and_recover(inode, &delegation->stateid);
index 5c1cce39297f68fb178b4a125a83001aeac4afde..e3c20a3ccc937453b678e9bb02d1a46827f11be0 100644 (file)
@@ -31,6 +31,7 @@ enum {
        NFS_DELEGATION_RETURN_IF_CLOSED,
        NFS_DELEGATION_REFERENCED,
        NFS_DELEGATION_RETURNING,
+       NFS_DELEGATION_REVOKED,
 };
 
 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
index 06e8cfcbb67053759d9b2b17e4c68a4547b257da..6e62155abf26d43c242c6e7de7250c120a5f1567 100644 (file)
@@ -1527,6 +1527,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
                case -ENOENT:
                        d_drop(dentry);
                        d_add(dentry, NULL);
+                       nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
                        break;
                case -EISDIR:
                case -ENOTDIR:
index 20cffc830468d993b7222a05eba469a6637d09fa..10bf07280f4ab2715845003334b73d80bde15f44 100644 (file)
@@ -266,6 +266,7 @@ static void nfs_direct_req_free(struct kref *kref)
 {
        struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
 
+       nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
        if (dreq->l_ctx != NULL)
                nfs_put_lock_context(dreq->l_ctx);
        if (dreq->ctx != NULL)
index 46fab1cb455aa4b7deb177eb0e6bb87b786e06d9..7afb52f6a25a1b412bffac5829705bd8137eafbc 100644 (file)
@@ -145,9 +145,6 @@ static int filelayout_async_handle_error(struct rpc_task *task,
        case -NFS4ERR_DELEG_REVOKED:
        case -NFS4ERR_ADMIN_REVOKED:
        case -NFS4ERR_BAD_STATEID:
-               if (state == NULL)
-                       break;
-               nfs_remove_bad_delegation(state->inode);
        case -NFS4ERR_OPENMODE:
                if (state == NULL)
                        break;
index 6388a59f2add0683d759606c1925c35546e914c1..00689a8a85e44677d9a23c092b209ab6600928e4 100644 (file)
@@ -626,7 +626,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
 {
        struct inode *inode = dentry->d_inode;
        int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
-       int err;
+       int err = 0;
 
        trace_nfs_getattr_enter(inode);
        /* Flush out writes to the server in order to update c/mtime.  */
index ef221fb8a18301d05282aa36a3c99ba2b1288b7c..f0e06e4acbef188988547eb4d8bf66178e6d3183 100644 (file)
@@ -19,6 +19,7 @@ struct nfs_net {
        struct rpc_pipe *bl_device_pipe;
        struct bl_dev_msg bl_mount_reply;
        wait_queue_head_t bl_wq;
+       struct mutex bl_mutex;
        struct list_head nfs_client_list;
        struct list_head nfs_volume_list;
 #if IS_ENABLED(CONFIG_NFS_V4)
index 405bd95c1f588739a511fb1566ed47f1513e696f..69dc20a743f9d35a8bc4b090b96e2c727c186e54 100644 (file)
@@ -370,11 +370,6 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
                case -NFS4ERR_DELEG_REVOKED:
                case -NFS4ERR_ADMIN_REVOKED:
                case -NFS4ERR_BAD_STATEID:
-                       if (inode != NULL && nfs4_have_delegation(inode, FMODE_READ)) {
-                               nfs_remove_bad_delegation(inode);
-                               exception->retry = 1;
-                               break;
-                       }
                        if (state == NULL)
                                break;
                        ret = nfs4_schedule_stateid_recovery(server, state);
@@ -1654,7 +1649,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
                        nfs_inode_find_state_and_recover(state->inode,
                                        stateid);
                        nfs4_schedule_stateid_recovery(server, state);
-                       return 0;
+                       return -EAGAIN;
                case -NFS4ERR_DELAY:
                case -NFS4ERR_GRACE:
                        set_bit(NFS_DELEGATED_STATE, &state->flags);
@@ -2109,46 +2104,60 @@ static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *sta
        return ret;
 }
 
+static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
+{
+       nfs_remove_bad_delegation(state->inode);
+       write_seqlock(&state->seqlock);
+       nfs4_stateid_copy(&state->stateid, &state->open_stateid);
+       write_sequnlock(&state->seqlock);
+       clear_bit(NFS_DELEGATED_STATE, &state->flags);
+}
+
+static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
+{
+       if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
+               nfs_finish_clear_delegation_stateid(state);
+}
+
+static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
+{
+       /* NFSv4.0 doesn't allow for delegation recovery on open expire */
+       nfs40_clear_delegation_stateid(state);
+       return nfs4_open_expired(sp, state);
+}
+
 #if defined(CONFIG_NFS_V4_1)
-static void nfs41_clear_delegation_stateid(struct nfs4_state *state)
+static void nfs41_check_delegation_stateid(struct nfs4_state *state)
 {
        struct nfs_server *server = NFS_SERVER(state->inode);
-       nfs4_stateid *stateid = &state->stateid;
+       nfs4_stateid stateid;
        struct nfs_delegation *delegation;
-       struct rpc_cred *cred = NULL;
-       int status = -NFS4ERR_BAD_STATEID;
-
-       /* If a state reset has been done, test_stateid is unneeded */
-       if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
-               return;
+       struct rpc_cred *cred;
+       int status;
 
        /* Get the delegation credential for use by test/free_stateid */
        rcu_read_lock();
        delegation = rcu_dereference(NFS_I(state->inode)->delegation);
-       if (delegation != NULL &&
-           nfs4_stateid_match(&delegation->stateid, stateid)) {
-               cred = get_rpccred(delegation->cred);
-               rcu_read_unlock();
-               status = nfs41_test_stateid(server, stateid, cred);
-               trace_nfs4_test_delegation_stateid(state, NULL, status);
-       } else
+       if (delegation == NULL) {
                rcu_read_unlock();
+               return;
+       }
+
+       nfs4_stateid_copy(&stateid, &delegation->stateid);
+       cred = get_rpccred(delegation->cred);
+       rcu_read_unlock();
+       status = nfs41_test_stateid(server, &stateid, cred);
+       trace_nfs4_test_delegation_stateid(state, NULL, status);
 
        if (status != NFS_OK) {
                /* Free the stateid unless the server explicitly
                 * informs us the stateid is unrecognized. */
                if (status != -NFS4ERR_BAD_STATEID)
-                       nfs41_free_stateid(server, stateid, cred);
-               nfs_remove_bad_delegation(state->inode);
-
-               write_seqlock(&state->seqlock);
-               nfs4_stateid_copy(&state->stateid, &state->open_stateid);
-               write_sequnlock(&state->seqlock);
-               clear_bit(NFS_DELEGATED_STATE, &state->flags);
+                       nfs41_free_stateid(server, &stateid, cred);
+               nfs_finish_clear_delegation_stateid(state);
        }
 
-       if (cred != NULL)
-               put_rpccred(cred);
+       put_rpccred(cred);
 }
 
 /**
@@ -2192,7 +2201,7 @@ static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
 {
        int status;
 
-       nfs41_clear_delegation_stateid(state);
+       nfs41_check_delegation_stateid(state);
        status = nfs41_check_open_stateid(state);
        if (status != NFS_OK)
                status = nfs4_open_expired(sp, state);
@@ -2231,19 +2240,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
        seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
 
        ret = _nfs4_proc_open(opendata);
-       if (ret != 0) {
-               if (ret == -ENOENT) {
-                       dentry = opendata->dentry;
-                       if (dentry->d_inode)
-                               d_delete(dentry);
-                       else if (d_unhashed(dentry))
-                               d_add(dentry, NULL);
-
-                       nfs_set_verifier(dentry,
-                                        nfs_save_change_attribute(opendata->dir->d_inode));
-               }
+       if (ret != 0)
                goto out;
-       }
 
        state = nfs4_opendata_to_nfs4_state(opendata);
        ret = PTR_ERR(state);
@@ -4841,9 +4839,6 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
                case -NFS4ERR_DELEG_REVOKED:
                case -NFS4ERR_ADMIN_REVOKED:
                case -NFS4ERR_BAD_STATEID:
-                       if (state == NULL)
-                               break;
-                       nfs_remove_bad_delegation(state->inode);
                case -NFS4ERR_OPENMODE:
                        if (state == NULL)
                                break;
@@ -8341,7 +8336,7 @@ static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
        .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
        .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
-       .recover_open   = nfs4_open_expired,
+       .recover_open   = nfs40_open_expired,
        .recover_lock   = nfs4_lock_expired,
        .establish_clid = nfs4_init_clientid,
 };
@@ -8408,8 +8403,7 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
                | NFS_CAP_CHANGE_ATTR
                | NFS_CAP_POSIX_LOCK
                | NFS_CAP_STATEID_NFSV41
-               | NFS_CAP_ATOMIC_OPEN_V1
-               | NFS_CAP_SEEK,
+               | NFS_CAP_ATOMIC_OPEN_V1,
        .init_client = nfs41_init_client,
        .shutdown_client = nfs41_shutdown_client,
        .match_stateid = nfs41_match_stateid,
@@ -8431,7 +8425,8 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
                | NFS_CAP_CHANGE_ATTR
                | NFS_CAP_POSIX_LOCK
                | NFS_CAP_STATEID_NFSV41
-               | NFS_CAP_ATOMIC_OPEN_V1,
+               | NFS_CAP_ATOMIC_OPEN_V1
+               | NFS_CAP_SEEK,
        .init_client = nfs41_init_client,
        .shutdown_client = nfs41_shutdown_client,
        .match_stateid = nfs41_match_stateid,
index 12493846a2d3ccc68323f11e34a396bbb487fe22..f83b02dc9166d1aaf48b76f816f96a1b908bcec1 100644 (file)
@@ -715,8 +715,6 @@ static void nfs_inode_remove_request(struct nfs_page *req)
 
        if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
                nfs_release_request(req);
-       else
-               WARN_ON_ONCE(1);
 }
 
 static void
index ed2b1151b171f275b6a5ebeea1946860876fb391..7cbdf1b2e4abd7286b6033c66a9d999e735737c9 100644 (file)
@@ -774,8 +774,12 @@ static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
 {
        if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
                rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
-               dprintk("%s slot is busy\n", __func__);
-               return false;
+               /* Race breaker */
+               if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
+                       dprintk("%s slot is busy\n", __func__);
+                       return false;
+               }
+               rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
        }
        return true;
 }
index 747f3b95bd11118aa477153fa97594127e2fbc1c..33a46a8dfaf73aaa65ecec7b4603b86ea4e49d83 100644 (file)
@@ -335,12 +335,15 @@ void              nfsd_lockd_shutdown(void);
        (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT)
 
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
-#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
-       (NFSD4_1_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SECURITY_LABEL)
+#define NFSD4_2_SECURITY_ATTRS         FATTR4_WORD2_SECURITY_LABEL
 #else
-#define NFSD4_2_SUPPORTED_ATTRS_WORD2 0
+#define NFSD4_2_SECURITY_ATTRS         0
 #endif
 
+#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
+       (NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
+       NFSD4_2_SECURITY_ATTRS)
+
 static inline u32 nfsd_suppattrs0(u32 minorversion)
 {
        return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0
index 9d3e9c50066aaf5856350cf3bc85576a79bab900..89326acd45615e50cc60c09cdd3531137b813138 100644 (file)
@@ -229,8 +229,16 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
                                              &fsnotify_mark_srcu);
        }
 
+       /*
+        * We need to merge inode & vfsmount mark lists so that inode mark
+        * ignore masks are properly reflected for mount mark notifications.
+        * That's why this traversal is so complicated...
+        */
        while (inode_node || vfsmount_node) {
-               inode_group = vfsmount_group = NULL;
+               inode_group = NULL;
+               inode_mark = NULL;
+               vfsmount_group = NULL;
+               vfsmount_mark = NULL;
 
                if (inode_node) {
                        inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu),
@@ -244,21 +252,19 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
                        vfsmount_group = vfsmount_mark->group;
                }
 
-               if (inode_group > vfsmount_group) {
-                       /* handle inode */
-                       ret = send_to_group(to_tell, inode_mark, NULL, mask,
-                                           data, data_is, cookie, file_name);
-                       /* we didn't use the vfsmount_mark */
-                       vfsmount_group = NULL;
-               } else if (vfsmount_group > inode_group) {
-                       ret = send_to_group(to_tell, NULL, vfsmount_mark, mask,
-                                           data, data_is, cookie, file_name);
-                       inode_group = NULL;
-               } else {
-                       ret = send_to_group(to_tell, inode_mark, vfsmount_mark,
-                                           mask, data, data_is, cookie,
-                                           file_name);
+               if (inode_group && vfsmount_group) {
+                       int cmp = fsnotify_compare_groups(inode_group,
+                                                         vfsmount_group);
+                       if (cmp > 0) {
+                               inode_group = NULL;
+                               inode_mark = NULL;
+                       } else if (cmp < 0) {
+                               vfsmount_group = NULL;
+                               vfsmount_mark = NULL;
+                       }
                }
+               ret = send_to_group(to_tell, inode_mark, vfsmount_mark, mask,
+                                   data, data_is, cookie, file_name);
 
                if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS))
                        goto out;
index 9c0898c4cfe1ce771a8a0832adda51c80225ce59..3b68b0ae0a97cb6beb6b642f3098de5c8aaec4d0 100644 (file)
@@ -12,6 +12,10 @@ extern void fsnotify_flush_notify(struct fsnotify_group *group);
 /* protects reads of inode and vfsmount marks list */
 extern struct srcu_struct fsnotify_mark_srcu;
 
+/* compare two groups for sorting of marks lists */
+extern int fsnotify_compare_groups(struct fsnotify_group *a,
+                                  struct fsnotify_group *b);
+
 extern void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *fsn_mark,
                                                __u32 mask);
 /* add a mark to an inode */
index e8497144b32342437377748f26a616168455339a..dfbf5447eea4cea8fdf664ff5b0232f8a61e68b0 100644 (file)
@@ -194,6 +194,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
 {
        struct fsnotify_mark *lmark, *last = NULL;
        int ret = 0;
+       int cmp;
 
        mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
 
@@ -219,11 +220,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
                        goto out;
                }
 
-               if (mark->group->priority < lmark->group->priority)
-                       continue;
-
-               if ((mark->group->priority == lmark->group->priority) &&
-                   (mark->group < lmark->group))
+               cmp = fsnotify_compare_groups(lmark->group, mark->group);
+               if (cmp < 0)
                        continue;
 
                hlist_add_before_rcu(&mark->i.i_list, &lmark->i.i_list);
index d90deaa08e78f6e82cde921f5914000bc9ca4a97..34c38fabf514f1a892e10e2e24ff4a3d252ec000 100644 (file)
@@ -209,6 +209,42 @@ void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mas
        mark->ignored_mask = mask;
 }
 
+/*
+ * Sorting function for lists of fsnotify marks.
+ *
+ * Fanotify supports different notification classes (reflected as priority of
+ * notification group). Events shall be passed to notification groups in
+ * decreasing priority order. To achieve this marks in notification lists for
+ * inodes and vfsmounts are sorted so that priorities of corresponding groups
+ * are descending.
+ *
+ * Furthermore correct handling of the ignore mask requires processing inode
+ * and vfsmount marks of each group together. Using the group address as
+ * further sort criterion provides a unique sorting order and thus we can
+ * merge inode and vfsmount lists of marks in linear time and find groups
+ * present in both lists.
+ *
+ * A return value of 1 signifies that b has priority over a.
+ * A return value of 0 signifies that the two marks have to be handled together.
+ * A return value of -1 signifies that a has priority over b.
+ */
+int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b)
+{
+       if (a == b)
+               return 0;
+       if (!a)
+               return 1;
+       if (!b)
+               return -1;
+       if (a->priority < b->priority)
+               return 1;
+       if (a->priority > b->priority)
+               return -1;
+       if (a < b)
+               return 1;
+       return -1;
+}
+
 /*
  * Attach an initialized mark to a given group and fs object.
  * These marks may be used for the fsnotify backend to determine which
index ac851e8376b1931d88adcf4ff5eaa8bd2445a635..faefa72a11ebaacff32569535e69a9005a0d4f0f 100644 (file)
@@ -153,6 +153,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
        struct mount *m = real_mount(mnt);
        struct fsnotify_mark *lmark, *last = NULL;
        int ret = 0;
+       int cmp;
 
        mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
 
@@ -178,11 +179,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
                        goto out;
                }
 
-               if (mark->group->priority < lmark->group->priority)
-                       continue;
-
-               if ((mark->group->priority == lmark->group->priority) &&
-                   (mark->group < lmark->group))
+               cmp = fsnotify_compare_groups(lmark->group, mark->group);
+               if (cmp < 0)
                        continue;
 
                hlist_add_before_rcu(&mark->m.m_list, &lmark->m.m_list);
index 97de0fbd9f784196312d2330b34e43873b6c6ecc..a9604400406491744281dfaee496491e36f5a074 100644 (file)
@@ -925,7 +925,7 @@ static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
                              size_t veclen, size_t total)
 {
        int ret;
-       struct msghdr msg;
+       struct msghdr msg = {.msg_flags = 0,};
 
        if (sock == NULL) {
                ret = -EINVAL;
index e60125976873b23314035580041ab73ef2f147ac..34355818a2e03460759b144f8f9c284d5a810423 100644 (file)
@@ -1,4 +1,4 @@
-config OVERLAYFS_FS
+config OVERLAY_FS
        tristate "Overlay filesystem support"
        help
          An overlay filesystem combines two filesystems - an 'upper' filesystem
index 8f91889480d0515011c3ebe64beef3660ea82b28..900daed3e91d28f1901c78cab4e9c2fdddf50043 100644 (file)
@@ -2,6 +2,6 @@
 # Makefile for the overlay filesystem.
 #
 
-obj-$(CONFIG_OVERLAYFS_FS) += overlayfs.o
+obj-$(CONFIG_OVERLAY_FS) += overlay.o
 
-overlayfs-objs := super.o inode.o dir.o readdir.o copy_up.o
+overlay-objs := super.o inode.o dir.o readdir.o copy_up.o
index 15cd91ad9940db5f130883120cf3b9606efe0705..8ffc4b980f1b68641c17a7bfb67979f205c658b5 100644 (file)
@@ -284,8 +284,7 @@ out:
        return ERR_PTR(err);
 }
 
-static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry,
-                                               enum ovl_path_type type)
+static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry)
 {
        int err;
        struct dentry *ret = NULL;
@@ -294,8 +293,17 @@ static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry,
        err = ovl_check_empty_dir(dentry, &list);
        if (err)
                ret = ERR_PTR(err);
-       else if (type == OVL_PATH_MERGE)
-               ret = ovl_clear_empty(dentry, &list);
+       else {
+               /*
+                * If no upperdentry then skip clearing whiteouts.
+                *
+                * Can race with copy-up, since we don't hold the upperdir
+                * mutex.  Doesn't matter, since copy-up can't create a
+                * non-empty directory from an empty one.
+                */
+               if (ovl_dentry_upper(dentry))
+                       ret = ovl_clear_empty(dentry, &list);
+       }
 
        ovl_cache_free(&list);
 
@@ -487,8 +495,7 @@ out:
        return err;
 }
 
-static int ovl_remove_and_whiteout(struct dentry *dentry,
-                                  enum ovl_path_type type, bool is_dir)
+static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
 {
        struct dentry *workdir = ovl_workdir(dentry);
        struct inode *wdir = workdir->d_inode;
@@ -500,7 +507,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry,
        int err;
 
        if (is_dir) {
-               opaquedir = ovl_check_empty_and_clear(dentry, type);
+               opaquedir = ovl_check_empty_and_clear(dentry);
                err = PTR_ERR(opaquedir);
                if (IS_ERR(opaquedir))
                        goto out;
@@ -515,9 +522,10 @@ static int ovl_remove_and_whiteout(struct dentry *dentry,
        if (IS_ERR(whiteout))
                goto out_unlock;
 
-       if (type == OVL_PATH_LOWER) {
+       upper = ovl_dentry_upper(dentry);
+       if (!upper) {
                upper = lookup_one_len(dentry->d_name.name, upperdir,
-                                          dentry->d_name.len);
+                                      dentry->d_name.len);
                err = PTR_ERR(upper);
                if (IS_ERR(upper))
                        goto kill_whiteout;
@@ -529,7 +537,6 @@ static int ovl_remove_and_whiteout(struct dentry *dentry,
        } else {
                int flags = 0;
 
-               upper = ovl_dentry_upper(dentry);
                if (opaquedir)
                        upper = opaquedir;
                err = -ESTALE;
@@ -648,7 +655,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
                cap_raise(override_cred->cap_effective, CAP_CHOWN);
                old_cred = override_creds(override_cred);
 
-               err = ovl_remove_and_whiteout(dentry, type, is_dir);
+               err = ovl_remove_and_whiteout(dentry, is_dir);
 
                revert_creds(old_cred);
                put_cred(override_cred);
@@ -781,7 +788,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
        }
 
        if (overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) {
-               opaquedir = ovl_check_empty_and_clear(new, new_type);
+               opaquedir = ovl_check_empty_and_clear(new);
                err = PTR_ERR(opaquedir);
                if (IS_ERR(opaquedir)) {
                        opaquedir = NULL;
index af2d18c9fcee19af345aa66c5bcf31407e316f21..07d74b24913bdee757377d103427883fb7d12e70 100644 (file)
@@ -235,26 +235,36 @@ out:
        return err;
 }
 
+static bool ovl_need_xattr_filter(struct dentry *dentry,
+                                 enum ovl_path_type type)
+{
+       return type == OVL_PATH_UPPER && S_ISDIR(dentry->d_inode->i_mode);
+}
+
 ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
                     void *value, size_t size)
 {
-       if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE &&
-           ovl_is_private_xattr(name))
+       struct path realpath;
+       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
+
+       if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
                return -ENODATA;
 
-       return vfs_getxattr(ovl_dentry_real(dentry), name, value, size);
+       return vfs_getxattr(realpath.dentry, name, value, size);
 }
 
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
 {
+       struct path realpath;
+       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
        ssize_t res;
        int off;
 
-       res = vfs_listxattr(ovl_dentry_real(dentry), list, size);
+       res = vfs_listxattr(realpath.dentry, list, size);
        if (res <= 0 || size == 0)
                return res;
 
-       if (ovl_path_type(dentry->d_parent) != OVL_PATH_MERGE)
+       if (!ovl_need_xattr_filter(dentry, type))
                return res;
 
        /* filter out private xattrs */
@@ -279,17 +289,16 @@ int ovl_removexattr(struct dentry *dentry, const char *name)
 {
        int err;
        struct path realpath;
-       enum ovl_path_type type;
+       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
 
        err = ovl_want_write(dentry);
        if (err)
                goto out;
 
-       if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE &&
-           ovl_is_private_xattr(name))
+       err = -ENODATA;
+       if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
                goto out_drop_write;
 
-       type = ovl_path_real(dentry, &realpath);
        if (type == OVL_PATH_LOWER) {
                err = vfs_getxattr(realpath.dentry, name, NULL, 0);
                if (err < 0)
index 4e9d7c1fea52a98b85e8c3ce04c792b07ef4377a..ab1e3dcbed9523d05b3bf4d73a5b685532e0acaa 100644 (file)
@@ -168,7 +168,7 @@ static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
 {
        struct ovl_dir_cache *cache = od->cache;
 
-       list_del(&od->cursor.l_node);
+       list_del_init(&od->cursor.l_node);
        WARN_ON(cache->refcount <= 0);
        cache->refcount--;
        if (!cache->refcount) {
@@ -274,11 +274,11 @@ static int ovl_dir_mark_whiteouts(struct dentry *dir,
        return 0;
 }
 
-static inline int ovl_dir_read_merged(struct path *upperpath,
-                                     struct path *lowerpath,
-                                     struct list_head *list)
+static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
 {
        int err;
+       struct path lowerpath;
+       struct path upperpath;
        struct ovl_readdir_data rdd = {
                .ctx.actor = ovl_fill_merge,
                .list = list,
@@ -286,25 +286,28 @@ static inline int ovl_dir_read_merged(struct path *upperpath,
                .is_merge = false,
        };
 
-       if (upperpath->dentry) {
-               err = ovl_dir_read(upperpath, &rdd);
+       ovl_path_lower(dentry, &lowerpath);
+       ovl_path_upper(dentry, &upperpath);
+
+       if (upperpath.dentry) {
+               err = ovl_dir_read(&upperpath, &rdd);
                if (err)
                        goto out;
 
-               if (lowerpath->dentry) {
-                       err = ovl_dir_mark_whiteouts(upperpath->dentry, &rdd);
+               if (lowerpath.dentry) {
+                       err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd);
                        if (err)
                                goto out;
                }
        }
-       if (lowerpath->dentry) {
+       if (lowerpath.dentry) {
                /*
                 * Insert lowerpath entries before upperpath ones, this allows
                 * offsets to be reasonably constant
                 */
                list_add(&rdd.middle, rdd.list);
                rdd.is_merge = true;
-               err = ovl_dir_read(lowerpath, &rdd);
+               err = ovl_dir_read(&lowerpath, &rdd);
                list_del(&rdd.middle);
        }
 out:
@@ -329,8 +332,6 @@ static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
 {
        int res;
-       struct path lowerpath;
-       struct path upperpath;
        struct ovl_dir_cache *cache;
 
        cache = ovl_dir_cache(dentry);
@@ -347,10 +348,7 @@ static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
        cache->refcount = 1;
        INIT_LIST_HEAD(&cache->entries);
 
-       ovl_path_lower(dentry, &lowerpath);
-       ovl_path_upper(dentry, &upperpath);
-
-       res = ovl_dir_read_merged(&upperpath, &lowerpath, &cache->entries);
+       res = ovl_dir_read_merged(dentry, &cache->entries);
        if (res) {
                ovl_cache_free(&cache->entries);
                kfree(cache);
@@ -452,10 +450,10 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
        /*
         * Need to check if we started out being a lower dir, but got copied up
         */
-       if (!od->is_upper && ovl_path_type(dentry) == OVL_PATH_MERGE) {
+       if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) {
                struct inode *inode = file_inode(file);
 
-               realfile =lockless_dereference(od->upperfile);
+               realfile = lockless_dereference(od->upperfile);
                if (!realfile) {
                        struct path upperpath;
 
@@ -538,14 +536,9 @@ const struct file_operations ovl_dir_operations = {
 int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
 {
        int err;
-       struct path lowerpath;
-       struct path upperpath;
        struct ovl_cache_entry *p;
 
-       ovl_path_upper(dentry, &upperpath);
-       ovl_path_lower(dentry, &lowerpath);
-
-       err = ovl_dir_read_merged(&upperpath, &lowerpath, list);
+       err = ovl_dir_read_merged(dentry, list);
        if (err)
                return err;
 
index 08b704cebfc4f8819944e09b05f5f7de35659b92..f16d318b71f8bbe4e77f8a3214e616101848e49c 100644 (file)
@@ -24,7 +24,7 @@ MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
 MODULE_DESCRIPTION("Overlay filesystem");
 MODULE_LICENSE("GPL");
 
-#define OVERLAYFS_SUPER_MAGIC 0x794c764f
+#define OVERLAYFS_SUPER_MAGIC 0x794c7630
 
 struct ovl_config {
        char *lowerdir;
@@ -84,12 +84,7 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry)
 
 static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
 {
-       struct dentry *upperdentry = ACCESS_ONCE(oe->__upperdentry);
-       /*
-        * Make sure to order reads to upperdentry wrt ovl_dentry_update()
-        */
-       smp_read_barrier_depends();
-       return upperdentry;
+       return lockless_dereference(oe->__upperdentry);
 }
 
 void ovl_path_upper(struct dentry *dentry, struct path *path)
@@ -462,11 +457,34 @@ static const match_table_t ovl_tokens = {
        {OPT_ERR,                       NULL}
 };
 
+static char *ovl_next_opt(char **s)
+{
+       char *sbegin = *s;
+       char *p;
+
+       if (sbegin == NULL)
+               return NULL;
+
+       for (p = sbegin; *p; p++) {
+               if (*p == '\\') {
+                       p++;
+                       if (!*p)
+                               break;
+               } else if (*p == ',') {
+                       *p = '\0';
+                       *s = p + 1;
+                       return sbegin;
+               }
+       }
+       *s = NULL;
+       return sbegin;
+}
+
 static int ovl_parse_opt(char *opt, struct ovl_config *config)
 {
        char *p;
 
-       while ((p = strsep(&opt, ",")) != NULL) {
+       while ((p = ovl_next_opt(&opt)) != NULL) {
                int token;
                substring_t args[MAX_OPT_ARGS];
 
@@ -554,15 +572,34 @@ out_dput:
        goto out_unlock;
 }
 
+static void ovl_unescape(char *s)
+{
+       char *d = s;
+
+       for (;; s++, d++) {
+               if (*s == '\\')
+                       s++;
+               *d = *s;
+               if (!*s)
+                       break;
+       }
+}
+
 static int ovl_mount_dir(const char *name, struct path *path)
 {
        int err;
+       char *tmp = kstrdup(name, GFP_KERNEL);
+
+       if (!tmp)
+               return -ENOMEM;
 
-       err = kern_path(name, LOOKUP_FOLLOW, path);
+       ovl_unescape(tmp);
+       err = kern_path(tmp, LOOKUP_FOLLOW, path);
        if (err) {
-               pr_err("overlayfs: failed to resolve '%s': %i\n", name, err);
+               pr_err("overlayfs: failed to resolve '%s': %i\n", tmp, err);
                err = -EINVAL;
        }
+       kfree(tmp);
        return err;
 }
 
@@ -776,11 +813,11 @@ static struct dentry *ovl_mount(struct file_system_type *fs_type, int flags,
 
 static struct file_system_type ovl_fs_type = {
        .owner          = THIS_MODULE,
-       .name           = "overlayfs",
+       .name           = "overlay",
        .mount          = ovl_mount,
        .kill_sb        = kill_anon_super,
 };
-MODULE_ALIAS_FS("overlayfs");
+MODULE_ALIAS_FS("overlay");
 
 static int __init ovl_init(void)
 {
index 92e8f99a58575b0667be026c9bfc6da5eeeae76d..281002689d64f858c605ce4a8d2aa72864034549 100644 (file)
@@ -1338,7 +1338,10 @@ xfs_free_file_space(
        goto out;
 }
 
-
+/*
+ * Preallocate and zero a range of a file. This mechanism has the allocation
+ * semantics of fallocate and in addition converts data in the range to zeroes.
+ */
 int
 xfs_zero_file_space(
        struct xfs_inode        *ip,
@@ -1346,65 +1349,30 @@ xfs_zero_file_space(
        xfs_off_t               len)
 {
        struct xfs_mount        *mp = ip->i_mount;
-       uint                    granularity;
-       xfs_off_t               start_boundary;
-       xfs_off_t               end_boundary;
+       uint                    blksize;
        int                     error;
 
        trace_xfs_zero_file_space(ip);
 
-       granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
+       blksize = 1 << mp->m_sb.sb_blocklog;
 
        /*
-        * Round the range of extents we are going to convert inwards.  If the
-        * offset is aligned, then it doesn't get changed so we zero from the
-        * start of the block offset points to.
+        * Punch a hole and prealloc the range. We use hole punch rather than
+        * unwritten extent conversion for two reasons:
+        *
+        * 1.) Hole punch handles partial block zeroing for us.
+        *
+        * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
+        * by virtue of the hole punch.
         */
-       start_boundary = round_up(offset, granularity);
-       end_boundary = round_down(offset + len, granularity);
-
-       ASSERT(start_boundary >= offset);
-       ASSERT(end_boundary <= offset + len);
-
-       if (start_boundary < end_boundary - 1) {
-               /*
-                * Writeback the range to ensure any inode size updates due to
-                * appending writes make it to disk (otherwise we could just
-                * punch out the delalloc blocks).
-                */
-               error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
-                               start_boundary, end_boundary - 1);
-               if (error)
-                       goto out;
-               truncate_pagecache_range(VFS_I(ip), start_boundary,
-                                        end_boundary - 1);
-
-               /* convert the blocks */
-               error = xfs_alloc_file_space(ip, start_boundary,
-                                       end_boundary - start_boundary - 1,
-                                       XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT);
-               if (error)
-                       goto out;
-
-               /* We've handled the interior of the range, now for the edges */
-               if (start_boundary != offset) {
-                       error = xfs_iozero(ip, offset, start_boundary - offset);
-                       if (error)
-                               goto out;
-               }
-
-               if (end_boundary != offset + len)
-                       error = xfs_iozero(ip, end_boundary,
-                                          offset + len - end_boundary);
-
-       } else {
-               /*
-                * It's either a sub-granularity range or the range spanned lies
-                * partially across two adjacent blocks.
-                */
-               error = xfs_iozero(ip, offset, len);
-       }
+       error = xfs_free_file_space(ip, offset, len);
+       if (error)
+               goto out;
 
+       error = xfs_alloc_file_space(ip, round_down(offset, blksize),
+                                    round_up(offset + len, blksize) -
+                                    round_down(offset, blksize),
+                                    XFS_BMAPI_PREALLOC);
 out:
        return error;
 
index f1deb961a296a300add3a6333eebd47619a2100c..894924a5129bea590e5fc3008d830889335e3fe1 100644 (file)
@@ -236,8 +236,10 @@ xfs_bulkstat_grab_ichunk(
        XFS_WANT_CORRUPTED_RETURN(stat == 1);
 
        /* Check if the record contains the inode in request */
-       if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino)
-               return -EINVAL;
+       if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) {
+               *icount = 0;
+               return 0;
+       }
 
        idx = agino - irec->ir_startino + 1;
        if (idx < XFS_INODES_PER_CHUNK &&
@@ -262,75 +264,76 @@ xfs_bulkstat_grab_ichunk(
 
 #define XFS_BULKSTAT_UBLEFT(ubleft)    ((ubleft) >= statstruct_size)
 
+struct xfs_bulkstat_agichunk {
+       char            __user **ac_ubuffer;/* pointer into user's buffer */
+       int             ac_ubleft;      /* bytes left in user's buffer */
+       int             ac_ubelem;      /* spaces used in user's buffer */
+};
+
 /*
  * Process inodes in chunk with a pointer to a formatter function
  * that will iget the inode and fill in the appropriate structure.
  */
-int
+static int
 xfs_bulkstat_ag_ichunk(
        struct xfs_mount                *mp,
        xfs_agnumber_t                  agno,
        struct xfs_inobt_rec_incore     *irbp,
        bulkstat_one_pf                 formatter,
        size_t                          statstruct_size,
-       struct xfs_bulkstat_agichunk    *acp)
+       struct xfs_bulkstat_agichunk    *acp,
+       xfs_agino_t                     *last_agino)
 {
-       xfs_ino_t                       lastino = acp->ac_lastino;
        char                            __user **ubufp = acp->ac_ubuffer;
-       int                             ubleft = acp->ac_ubleft;
-       int                             ubelem = acp->ac_ubelem;
-       int                             chunkidx, clustidx;
+       int                             chunkidx;
        int                             error = 0;
-       xfs_agino_t                     agino;
+       xfs_agino_t                     agino = irbp->ir_startino;
 
-       for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
-            XFS_BULKSTAT_UBLEFT(ubleft) &&
-            irbp->ir_freecount < XFS_INODES_PER_CHUNK;
-            chunkidx++, clustidx++, agino++) {
-               int             fmterror;       /* bulkstat formatter result */
+       for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK;
+            chunkidx++, agino++) {
+               int             fmterror;
                int             ubused;
-               xfs_ino_t       ino = XFS_AGINO_TO_INO(mp, agno, agino);
 
-               ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
+               /* inode won't fit in buffer, we are done */
+               if (acp->ac_ubleft < statstruct_size)
+                       break;
 
                /* Skip if this inode is free */
-               if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) {
-                       lastino = ino;
+               if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free)
                        continue;
-               }
-
-               /*
-                * Count used inodes as free so we can tell when the
-                * chunk is used up.
-                */
-               irbp->ir_freecount++;
 
                /* Get the inode and fill in a single buffer */
                ubused = statstruct_size;
-               error = formatter(mp, ino, *ubufp, ubleft, &ubused, &fmterror);
-               if (fmterror == BULKSTAT_RV_NOTHING) {
-                       if (error && error != -ENOENT && error != -EINVAL) {
-                               ubleft = 0;
-                               break;
-                       }
-                       lastino = ino;
-                       continue;
-               }
-               if (fmterror == BULKSTAT_RV_GIVEUP) {
-                       ubleft = 0;
+               error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino),
+                                 *ubufp, acp->ac_ubleft, &ubused, &fmterror);
+
+               if (fmterror == BULKSTAT_RV_GIVEUP ||
+                   (error && error != -ENOENT && error != -EINVAL)) {
+                       acp->ac_ubleft = 0;
                        ASSERT(error);
                        break;
                }
-               if (*ubufp)
-                       *ubufp += ubused;
-               ubleft -= ubused;
-               ubelem++;
-               lastino = ino;
+
+               /* be careful not to leak error if at end of chunk */
+               if (fmterror == BULKSTAT_RV_NOTHING || error) {
+                       error = 0;
+                       continue;
+               }
+
+               *ubufp += ubused;
+               acp->ac_ubleft -= ubused;
+               acp->ac_ubelem++;
        }
 
-       acp->ac_lastino = lastino;
-       acp->ac_ubleft = ubleft;
-       acp->ac_ubelem = ubelem;
+       /*
+        * Post-update *last_agino. At this point, agino will always point one
+        * inode past the last inode we processed successfully. Hence we
+        * substract that inode when setting the *last_agino cursor so that we
+        * return the correct cookie to userspace. On the next bulkstat call,
+        * the inode under the lastino cookie will be skipped as we have already
+        * processed it here.
+        */
+       *last_agino = agino - 1;
 
        return error;
 }
@@ -353,45 +356,33 @@ xfs_bulkstat(
        xfs_agino_t             agino;  /* inode # in allocation group */
        xfs_agnumber_t          agno;   /* allocation group number */
        xfs_btree_cur_t         *cur;   /* btree cursor for ialloc btree */
-       int                     end_of_ag; /* set if we've seen the ag end */
-       int                     error;  /* error code */
-       int                     fmterror;/* bulkstat formatter result */
-       int                     i;      /* loop index */
-       int                     icount; /* count of inodes good in irbuf */
        size_t                  irbsize; /* size of irec buffer in bytes */
-       xfs_ino_t               ino;    /* inode number (filesystem) */
-       xfs_inobt_rec_incore_t  *irbp;  /* current irec buffer pointer */
        xfs_inobt_rec_incore_t  *irbuf; /* start of irec buffer */
-       xfs_inobt_rec_incore_t  *irbufend; /* end of good irec buffer entries */
-       xfs_ino_t               lastino; /* last inode number returned */
        int                     nirbuf; /* size of irbuf */
-       int                     rval;   /* return value error code */
-       int                     tmp;    /* result value from btree calls */
        int                     ubcount; /* size of user's buffer */
-       int                     ubleft; /* bytes left in user's buffer */
-       char                    __user *ubufp;  /* pointer into user's buffer */
-       int                     ubelem; /* spaces used in user's buffer */
+       struct xfs_bulkstat_agichunk ac;
+       int                     error = 0;
 
        /*
         * Get the last inode value, see if there's nothing to do.
         */
-       ino = (xfs_ino_t)*lastinop;
-       lastino = ino;
-       agno = XFS_INO_TO_AGNO(mp, ino);
-       agino = XFS_INO_TO_AGINO(mp, ino);
+       agno = XFS_INO_TO_AGNO(mp, *lastinop);
+       agino = XFS_INO_TO_AGINO(mp, *lastinop);
        if (agno >= mp->m_sb.sb_agcount ||
-           ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
+           *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) {
                *done = 1;
                *ubcountp = 0;
                return 0;
        }
 
        ubcount = *ubcountp; /* statstruct's */
-       ubleft = ubcount * statstruct_size; /* bytes */
-       *ubcountp = ubelem = 0;
+       ac.ac_ubuffer = &ubuffer;
+       ac.ac_ubleft = ubcount * statstruct_size; /* bytes */;
+       ac.ac_ubelem = 0;
+
+       *ubcountp = 0;
        *done = 0;
-       fmterror = 0;
-       ubufp = ubuffer;
+
        irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
        if (!irbuf)
                return -ENOMEM;
@@ -402,9 +393,13 @@ xfs_bulkstat(
         * Loop over the allocation groups, starting from the last
         * inode returned; 0 means start of the allocation group.
         */
-       rval = 0;
-       while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
-               cond_resched();
+       while (agno < mp->m_sb.sb_agcount) {
+               struct xfs_inobt_rec_incore     *irbp = irbuf;
+               struct xfs_inobt_rec_incore     *irbufend = irbuf + nirbuf;
+               bool                            end_of_ag = false;
+               int                             icount = 0;
+               int                             stat;
+
                error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
                if (error)
                        break;
@@ -414,10 +409,6 @@ xfs_bulkstat(
                 */
                cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
                                            XFS_BTNUM_INO);
-               irbp = irbuf;
-               irbufend = irbuf + nirbuf;
-               end_of_ag = 0;
-               icount = 0;
                if (agino > 0) {
                        /*
                         * In the middle of an allocation group, we need to get
@@ -427,22 +418,23 @@ xfs_bulkstat(
 
                        error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r);
                        if (error)
-                               break;
+                               goto del_cursor;
                        if (icount) {
                                irbp->ir_startino = r.ir_startino;
                                irbp->ir_freecount = r.ir_freecount;
                                irbp->ir_free = r.ir_free;
                                irbp++;
-                               agino = r.ir_startino + XFS_INODES_PER_CHUNK;
                        }
                        /* Increment to the next record */
-                       error = xfs_btree_increment(cur, 0, &tmp);
+                       error = xfs_btree_increment(cur, 0, &stat);
                } else {
                        /* Start of ag.  Lookup the first inode chunk */
-                       error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp);
+                       error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat);
+               }
+               if (error || stat == 0) {
+                       end_of_ag = true;
+                       goto del_cursor;
                }
-               if (error)
-                       break;
 
                /*
                 * Loop through inode btree records in this ag,
@@ -451,10 +443,10 @@ xfs_bulkstat(
                while (irbp < irbufend && icount < ubcount) {
                        struct xfs_inobt_rec_incore     r;
 
-                       error = xfs_inobt_get_rec(cur, &r, &i);
-                       if (error || i == 0) {
-                               end_of_ag = 1;
-                               break;
+                       error = xfs_inobt_get_rec(cur, &r, &stat);
+                       if (error || stat == 0) {
+                               end_of_ag = true;
+                               goto del_cursor;
                        }
 
                        /*
@@ -469,77 +461,79 @@ xfs_bulkstat(
                                irbp++;
                                icount += XFS_INODES_PER_CHUNK - r.ir_freecount;
                        }
-                       /*
-                        * Set agino to after this chunk and bump the cursor.
-                        */
-                       agino = r.ir_startino + XFS_INODES_PER_CHUNK;
-                       error = xfs_btree_increment(cur, 0, &tmp);
+                       error = xfs_btree_increment(cur, 0, &stat);
+                       if (error || stat == 0) {
+                               end_of_ag = true;
+                               goto del_cursor;
+                       }
                        cond_resched();
                }
+
                /*
-                * Drop the btree buffers and the agi buffer.
-                * We can't hold any of the locks these represent
-                * when calling iget.
+                * Drop the btree buffers and the agi buffer as we can't hold any
+                * of the locks these represent when calling iget. If there is a
+                * pending error, then we are done.
                 */
+del_cursor:
                xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
                xfs_buf_relse(agbp);
+               if (error)
+                       break;
                /*
-                * Now format all the good inodes into the user's buffer.
+                * Now format all the good inodes into the user's buffer. The
+                * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer
+                * for the next loop iteration.
                 */
                irbufend = irbp;
                for (irbp = irbuf;
-                    irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) {
-                       struct xfs_bulkstat_agichunk ac;
-
-                       ac.ac_lastino = lastino;
-                       ac.ac_ubuffer = &ubuffer;
-                       ac.ac_ubleft = ubleft;
-                       ac.ac_ubelem = ubelem;
+                    irbp < irbufend && ac.ac_ubleft >= statstruct_size;
+                    irbp++) {
                        error = xfs_bulkstat_ag_ichunk(mp, agno, irbp,
-                                       formatter, statstruct_size, &ac);
+                                       formatter, statstruct_size, &ac,
+                                       &agino);
                        if (error)
-                               rval = error;
-
-                       lastino = ac.ac_lastino;
-                       ubleft = ac.ac_ubleft;
-                       ubelem = ac.ac_ubelem;
+                               break;
 
                        cond_resched();
                }
+
                /*
-                * Set up for the next loop iteration.
+                * If we've run out of space or had a formatting error, we
+                * are now done
                 */
-               if (XFS_BULKSTAT_UBLEFT(ubleft)) {
-                       if (end_of_ag) {
-                               agno++;
-                               agino = 0;
-                       } else
-                               agino = XFS_INO_TO_AGINO(mp, lastino);
-               } else
+               if (ac.ac_ubleft < statstruct_size || error)
                        break;
+
+               if (end_of_ag) {
+                       agno++;
+                       agino = 0;
+               }
        }
        /*
         * Done, we're either out of filesystem or space to put the data.
         */
        kmem_free(irbuf);
-       *ubcountp = ubelem;
+       *ubcountp = ac.ac_ubelem;
+
        /*
-        * Found some inodes, return them now and return the error next time.
+        * We found some inodes, so clear the error status and return them.
+        * The lastino pointer will point directly at the inode that triggered
+        * any error that occurred, so on the next call the error will be
+        * triggered again and propagated to userspace as there will be no
+        * formatted inodes in the buffer.
         */
-       if (ubelem)
-               rval = 0;
-       if (agno >= mp->m_sb.sb_agcount) {
-               /*
-                * If we ran out of filesystem, mark lastino as off
-                * the end of the filesystem, so the next call
-                * will return immediately.
-                */
-               *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0);
+       if (ac.ac_ubelem)
+               error = 0;
+
+       /*
+        * If we ran out of filesystem, lastino will point off the end of
+        * the filesystem so the next call will return immediately.
+        */
+       *lastinop = XFS_AGINO_TO_INO(mp, agno, agino);
+       if (agno >= mp->m_sb.sb_agcount)
                *done = 1;
-       } else
-               *lastinop = (xfs_ino_t)lastino;
 
-       return rval;
+       return error;
 }
 
 int
index aaed08022eb9e9cd7e271d86d54e308e01427c77..6ea8b3912fa4fcb5b67ac9fd0d9c24164c12f6c1 100644 (file)
@@ -30,22 +30,6 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount      *mp,
                               int              *ubused,
                               int              *stat);
 
-struct xfs_bulkstat_agichunk {
-       xfs_ino_t       ac_lastino;     /* last inode returned */
-       char            __user **ac_ubuffer;/* pointer into user's buffer */
-       int             ac_ubleft;      /* bytes left in user's buffer */
-       int             ac_ubelem;      /* spaces used in user's buffer */
-};
-
-int
-xfs_bulkstat_ag_ichunk(
-       struct xfs_mount                *mp,
-       xfs_agnumber_t                  agno,
-       struct xfs_inobt_rec_incore     *irbp,
-       bulkstat_one_pf                 formatter,
-       size_t                          statstruct_size,
-       struct xfs_bulkstat_agichunk    *acp);
-
 /*
  * Values for stat return value.
  */
index 53ed87698a74e0e4ae121e75607d74645456525a..8ba35c622e2202a889a3a06706bdc93e035fb841 100644 (file)
@@ -125,8 +125,8 @@ struct dma_buf_attachment;
 extern __printf(2, 3)
 void drm_ut_debug_printk(const char *function_name,
                         const char *format, ...);
-extern __printf(2, 3)
-void drm_err(const char *func, const char *format, ...);
+extern __printf(1, 2)
+void drm_err(const char *format, ...);
 
 /***********************************************************************/
 /** \name DRM template customization defaults */
@@ -155,7 +155,7 @@ void drm_err(const char *func, const char *format, ...);
  * \param arg arguments
  */
 #define DRM_ERROR(fmt, ...)                            \
-       drm_err(__func__, fmt, ##__VA_ARGS__)
+       drm_err(fmt, ##__VA_ARGS__)
 
 /**
  * Rate limited error output.  Like DRM_ERROR() but won't flood the log.
@@ -170,7 +170,7 @@ void drm_err(const char *func, const char *format, ...);
                                      DEFAULT_RATELIMIT_BURST);         \
                                                                        \
        if (__ratelimit(&_rs))                                          \
-               drm_err(__func__, fmt, ##__VA_ARGS__);                  \
+               drm_err(fmt, ##__VA_ARGS__);                            \
 })
 
 #define DRM_INFO(fmt, ...)                             \
@@ -809,7 +809,7 @@ struct drm_device {
        struct drm_local_map *agp_buffer_map;
        unsigned int agp_buffer_token;
 
-        struct drm_mode_config mode_config;    /**< Current mode config */
+       struct drm_mode_config mode_config;     /**< Current mode config */
 
        /** \name GEM information */
        /*@{ */
@@ -986,7 +986,7 @@ extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
 
 extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
                                            dma_addr_t *addrs, int max_pages);
-extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
+extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
 extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
 
 
@@ -1028,10 +1028,25 @@ void drm_pci_agp_destroy(struct drm_device *dev);
 
 extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
 extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
+#ifdef CONFIG_PCI
 extern int drm_get_pci_dev(struct pci_dev *pdev,
                           const struct pci_device_id *ent,
                           struct drm_driver *driver);
 extern int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
+#else
+static inline int drm_get_pci_dev(struct pci_dev *pdev,
+                                 const struct pci_device_id *ent,
+                                 struct drm_driver *driver)
+{
+       return -ENOSYS;
+}
+
+static inline int drm_pci_set_busid(struct drm_device *dev,
+                                   struct drm_master *master)
+{
+       return -ENOSYS;
+}
+#endif
 
 #define DRM_PCIE_SPEED_25 1
 #define DRM_PCIE_SPEED_50 2
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
new file mode 100644 (file)
index 0000000..ad22295
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#ifndef DRM_ATOMIC_H_
+#define DRM_ATOMIC_H_
+
+#include <drm/drm_crtc.h>
+
+struct drm_atomic_state * __must_check
+drm_atomic_state_alloc(struct drm_device *dev);
+void drm_atomic_state_clear(struct drm_atomic_state *state);
+void drm_atomic_state_free(struct drm_atomic_state *state);
+
+struct drm_crtc_state * __must_check
+drm_atomic_get_crtc_state(struct drm_atomic_state *state,
+                         struct drm_crtc *crtc);
+struct drm_plane_state * __must_check
+drm_atomic_get_plane_state(struct drm_atomic_state *state,
+                          struct drm_plane *plane);
+struct drm_connector_state * __must_check
+drm_atomic_get_connector_state(struct drm_atomic_state *state,
+                              struct drm_connector *connector);
+
+int __must_check
+drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
+                             struct drm_plane *plane, struct drm_crtc *crtc);
+void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
+                                struct drm_framebuffer *fb);
+int __must_check
+drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
+                                 struct drm_crtc *crtc);
+int __must_check
+drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
+                                  struct drm_crtc *crtc);
+int
+drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
+                              struct drm_crtc *crtc);
+
+void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
+
+int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
+int __must_check drm_atomic_commit(struct drm_atomic_state *state);
+int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
+
+#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
new file mode 100644 (file)
index 0000000..f956b41
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#ifndef DRM_ATOMIC_HELPER_H_
+#define DRM_ATOMIC_HELPER_H_
+
+#include <drm/drm_crtc.h>
+
+int drm_atomic_helper_check(struct drm_device *dev,
+                           struct drm_atomic_state *state);
+int drm_atomic_helper_commit(struct drm_device *dev,
+                            struct drm_atomic_state *state,
+                            bool async);
+
+void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
+                                       struct drm_atomic_state *old_state);
+
+void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
+                                        struct drm_atomic_state *state);
+void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
+                                         struct drm_atomic_state *old_state);
+
+int drm_atomic_helper_prepare_planes(struct drm_device *dev,
+                                    struct drm_atomic_state *state);
+void drm_atomic_helper_commit_planes(struct drm_device *dev,
+                                    struct drm_atomic_state *state);
+void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
+                                     struct drm_atomic_state *old_state);
+
+void drm_atomic_helper_swap_state(struct drm_device *dev,
+                                 struct drm_atomic_state *state);
+
+/* implementations for legacy interfaces */
+int drm_atomic_helper_update_plane(struct drm_plane *plane,
+                                  struct drm_crtc *crtc,
+                                  struct drm_framebuffer *fb,
+                                  int crtc_x, int crtc_y,
+                                  unsigned int crtc_w, unsigned int crtc_h,
+                                  uint32_t src_x, uint32_t src_y,
+                                  uint32_t src_w, uint32_t src_h);
+int drm_atomic_helper_disable_plane(struct drm_plane *plane);
+int drm_atomic_helper_set_config(struct drm_mode_set *set);
+
+int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
+                                       struct drm_property *property,
+                                       uint64_t val);
+int drm_atomic_helper_plane_set_property(struct drm_plane *plane,
+                                       struct drm_property *property,
+                                       uint64_t val);
+int drm_atomic_helper_connector_set_property(struct drm_connector *connector,
+                                       struct drm_property *property,
+                                       uint64_t val);
+int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
+                               struct drm_framebuffer *fb,
+                               struct drm_pending_vblank_event *event,
+                               uint32_t flags);
+
+/* default implementations for state handling */
+void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
+struct drm_crtc_state *
+drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
+void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+                                         struct drm_crtc_state *state);
+
+void drm_atomic_helper_plane_reset(struct drm_plane *plane);
+struct drm_plane_state *
+drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
+void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+                                         struct drm_plane_state *state);
+
+void drm_atomic_helper_connector_reset(struct drm_connector *connector);
+struct drm_connector_state *
+drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
+void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+                                         struct drm_connector_state *state);
+
+/**
+ * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
+ * @plane: the loop cursor
+ * @crtc:  the crtc whose planes are iterated
+ *
+ * This iterates over the current state, useful (for example) when applying
+ * atomic state after it has been checked and swapped.  To iterate over the
+ * planes which *will* be attached (for ->atomic_check()) see
+ * drm_crtc_for_each_pending_plane()
+ */
+#define drm_atomic_crtc_for_each_plane(plane, crtc) \
+       drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
+
+/**
+ * drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state
+ * @plane: the loop cursor
+ * @crtc_state: the incoming crtc-state
+ *
+ * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
+ * attached if the specified state is applied.  Useful during (for example)
+ * ->atomic_check() operations, to validate the incoming state
+ */
+#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
+       drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
+
+#endif /* DRM_ATOMIC_HELPER_H_ */
index c40070a92d6b43f055c77f749ec44afc713e031a..dd2c16e4333312194151cf47b946507871d2027d 100644 (file)
@@ -42,6 +42,7 @@ struct drm_object_properties;
 struct drm_file;
 struct drm_clip_rect;
 struct device_node;
+struct fence;
 
 #define DRM_MODE_OBJECT_CRTC 0xcccccccc
 #define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
@@ -142,8 +143,8 @@ struct drm_framebuffer_funcs {
        int (*create_handle)(struct drm_framebuffer *fb,
                             struct drm_file *file_priv,
                             unsigned int *handle);
-       /**
-        * Optinal callback for the dirty fb ioctl.
+       /*
+        * Optional callback for the dirty fb ioctl.
         *
         * Userspace can notify the driver via this callback
         * that a area of the framebuffer has changed and should
@@ -196,7 +197,7 @@ struct drm_framebuffer {
 struct drm_property_blob {
        struct drm_mode_object base;
        struct list_head head;
-       unsigned int length;
+       size_t length;
        unsigned char data[];
 };
 
@@ -215,7 +216,7 @@ struct drm_property {
        uint64_t *values;
        struct drm_device *dev;
 
-       struct list_head enum_blob_list;
+       struct list_head enum_list;
 };
 
 struct drm_crtc;
@@ -224,19 +225,65 @@ struct drm_encoder;
 struct drm_pending_vblank_event;
 struct drm_plane;
 struct drm_bridge;
+struct drm_atomic_state;
+
+/**
+ * struct drm_crtc_state - mutable CRTC state
+ * @enable: whether the CRTC should be enabled, gates all other state
+ * @mode_changed: for use by helpers and drivers when computing state updates
+ * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
+ * @last_vblank_count: for helpers and drivers to capture the vblank of the
+ *     update to ensure framebuffer cleanup isn't done too early
+ * @planes_changed: for use by helpers and drivers when computing state updates
+ * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
+ * @mode: current mode timings
+ * @event: optional pointer to a DRM event to signal upon completion of the
+ *     state update
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_crtc_state {
+       bool enable;
+
+       /* computed state bits used by helpers and drivers */
+       bool planes_changed : 1;
+       bool mode_changed : 1;
+
+       /* attached planes bitmask:
+        * WARNING: transitional helpers do not maintain plane_mask so
+        * drivers not converted over to atomic helpers should not rely
+        * on plane_mask being accurate!
+        */
+       u32 plane_mask;
+
+       /* last_vblank_count: for vblank waits before cleanup */
+       u32 last_vblank_count;
+
+       /* adjusted_mode: for use by helpers and drivers */
+       struct drm_display_mode adjusted_mode;
+
+       struct drm_display_mode mode;
+
+       struct drm_pending_vblank_event *event;
+
+       struct drm_atomic_state *state;
+};
 
 /**
- * drm_crtc_funcs - control CRTCs for a given device
+ * struct drm_crtc_funcs - control CRTCs for a given device
  * @save: save CRTC state
  * @restore: restore CRTC state
  * @reset: reset CRTC after state has been invalidated (e.g. resume)
  * @cursor_set: setup the cursor
+ * @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set
  * @cursor_move: move the cursor
  * @gamma_set: specify color ramp for CRTC
  * @destroy: deinit and free object
  * @set_property: called when a property is changed
  * @set_config: apply a new CRTC configuration
  * @page_flip: initiate a page flip
+ * @atomic_duplicate_state: duplicate the atomic state for this CRTC
+ * @atomic_destroy_state: destroy an atomic state for this CRTC
+ * @atomic_set_property: set a property on an atomic state for this CRTC
  *
  * The drm_crtc_funcs structure is the central CRTC management structure
  * in the DRM.  Each CRTC controls one or more connectors (note that the name
@@ -287,16 +334,28 @@ struct drm_crtc_funcs {
 
        int (*set_property)(struct drm_crtc *crtc,
                            struct drm_property *property, uint64_t val);
+
+       /* atomic update handling */
+       struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc);
+       void (*atomic_destroy_state)(struct drm_crtc *crtc,
+                                    struct drm_crtc_state *state);
+       int (*atomic_set_property)(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *state,
+                                  struct drm_property *property,
+                                  uint64_t val);
 };
 
 /**
- * drm_crtc - central CRTC control structure
+ * struct drm_crtc - central CRTC control structure
  * @dev: parent DRM device
+ * @port: OF node used by drm_of_find_possible_crtcs()
  * @head: list management
  * @mutex: per-CRTC locking
  * @base: base KMS object for ID tracking etc.
  * @primary: primary plane for this CRTC
  * @cursor: cursor plane for this CRTC
+ * @cursor_x: current x position of the cursor, used for universal cursor planes
+ * @cursor_y: current y position of the cursor, used for universal cursor planes
  * @enabled: is this CRTC enabled?
  * @mode: current mode timings
  * @hwmode: mode timings as programmed to hw regs
@@ -309,10 +368,13 @@ struct drm_crtc_funcs {
  * @gamma_size: size of gamma ramp
  * @gamma_store: gamma ramp values
  * @framedur_ns: precise frame timing
- * @framedur_ns: precise line timing
+ * @linedur_ns: precise line timing
  * @pixeldur_ns: precise pixel timing
  * @helper_private: mid-layer private data
  * @properties: property tracking for this CRTC
+ * @state: current atomic state for this CRTC
+ * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for
+ *     legacy ioctls
  *
  * Each CRTC may have one or more connectors associated with it.  This structure
  * allows the CRTC to be controlled.
@@ -322,7 +384,7 @@ struct drm_crtc {
        struct device_node *port;
        struct list_head head;
 
-       /**
+       /*
         * crtc mutex
         *
         * This provides a read lock for the overall crtc state (mode, dpms
@@ -368,6 +430,8 @@ struct drm_crtc {
 
        struct drm_object_properties properties;
 
+       struct drm_crtc_state *state;
+
        /*
         * For legacy crtc ioctls so that atomic drivers can get at the locking
         * acquire context.
@@ -375,9 +439,22 @@ struct drm_crtc {
        struct drm_modeset_acquire_ctx *acquire_ctx;
 };
 
+/**
+ * struct drm_connector_state - mutable connector state
+ * @crtc: CRTC to connect connector to, NULL if disabled
+ * @best_encoder: can be used by helpers and drivers to select the encoder
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_connector_state {
+       struct drm_crtc *crtc;  /* do not write directly, use drm_atomic_set_crtc_for_connector() */
+
+       struct drm_encoder *best_encoder;
+
+       struct drm_atomic_state *state;
+};
 
 /**
- * drm_connector_funcs - control connectors on a given device
+ * struct drm_connector_funcs - control connectors on a given device
  * @dpms: set power state (see drm_crtc_funcs above)
  * @save: save connector state
  * @restore: restore connector state
@@ -387,6 +464,9 @@ struct drm_crtc {
  * @set_property: property for this connector may need an update
  * @destroy: make object go away
  * @force: notify the driver that the connector is forced on
+ * @atomic_duplicate_state: duplicate the atomic state for this connector
+ * @atomic_destroy_state: destroy an atomic state for this connector
+ * @atomic_set_property: set a property on an atomic state for this connector
  *
  * Each CRTC may have one or more connectors attached to it.  The functions
  * below allow the core DRM code to control connectors, enumerate available modes,
@@ -411,10 +491,19 @@ struct drm_connector_funcs {
                             uint64_t val);
        void (*destroy)(struct drm_connector *connector);
        void (*force)(struct drm_connector *connector);
+
+       /* atomic update handling */
+       struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
+       void (*atomic_destroy_state)(struct drm_connector *connector,
+                                    struct drm_connector_state *state);
+       int (*atomic_set_property)(struct drm_connector *connector,
+                                  struct drm_connector_state *state,
+                                  struct drm_property *property,
+                                  uint64_t val);
 };
 
 /**
- * drm_encoder_funcs - encoder controls
+ * struct drm_encoder_funcs - encoder controls
  * @reset: reset state (e.g. at init or resume time)
  * @destroy: cleanup and free associated data
  *
@@ -428,7 +517,7 @@ struct drm_encoder_funcs {
 #define DRM_CONNECTOR_MAX_ENCODER 3
 
 /**
- * drm_encoder - central DRM encoder structure
+ * struct drm_encoder - central DRM encoder structure
  * @dev: parent DRM device
  * @head: list management
  * @base: base KMS object
@@ -472,7 +561,7 @@ struct drm_encoder {
 #define MAX_ELD_BYTES  128
 
 /**
- * drm_connector - central DRM connector control structure
+ * struct drm_connector - central DRM connector control structure
  * @dev: parent DRM device
  * @kdev: kernel device for sysfs attributes
  * @attr: sysfs attributes
@@ -483,6 +572,7 @@ struct drm_encoder {
  * @connector_type_id: index into connector type enum
  * @interlace_allowed: can this connector handle interlaced modes?
  * @doublescan_allowed: can this connector handle doublescan?
+ * @stereo_allowed: can this connector handle stereo modes?
  * @modes: modes available on this connector (from fill_modes() + user)
  * @status: one of the drm_connector_status enums (connected, not, or unknown)
  * @probed_modes: list of modes derived directly from the display
@@ -490,10 +580,13 @@ struct drm_encoder {
  * @funcs: connector control functions
  * @edid_blob_ptr: DRM property containing EDID if present
  * @properties: property tracking for this connector
+ * @path_blob_ptr: DRM blob property data for the DP MST path property
  * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
  * @dpms: current dpms state
  * @helper_private: mid-layer private data
+ * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
  * @force: a %DRM_FORCE_<foo> state for forced mode sets
+ * @override_edid: has the EDID been overwritten through debugfs for testing?
  * @encoder_ids: valid encoders for this connector
  * @encoder: encoder driving this connector, if any
  * @eld: EDID-like data, if present
@@ -503,6 +596,9 @@ struct drm_encoder {
  * @video_latency: video latency info from ELD, if found
  * @audio_latency: audio latency info from ELD, if found
  * @null_edid_counter: track sinks that give us all zeros for the EDID
+ * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
+ * @debugfs_entry: debugfs directory for this connector
+ * @state: current atomic state for this connector
  *
  * Each connector may be connected to one or more CRTCs, or may be clonable by
  * another connector if they can share a CRTC.  Each connector also has a specific
@@ -563,14 +659,54 @@ struct drm_connector {
        unsigned bad_edid_counter;
 
        struct dentry *debugfs_entry;
+
+       struct drm_connector_state *state;
+};
+
+/**
+ * struct drm_plane_state - mutable plane state
+ * @crtc: currently bound CRTC, NULL if disabled
+ * @fb: currently bound framebuffer
+ * @fence: optional fence to wait for before scanning out @fb
+ * @crtc_x: left position of visible portion of plane on crtc
+ * @crtc_y: upper position of visible portion of plane on crtc
+ * @crtc_w: width of visible portion of plane on crtc
+ * @crtc_h: height of visible portion of plane on crtc
+ * @src_x: left position of visible portion of plane within
+ *     plane (in 16.16)
+ * @src_y: upper position of visible portion of plane within
+ *     plane (in 16.16)
+ * @src_w: width of visible portion of plane (in 16.16)
+ * @src_h: height of visible portion of plane (in 16.16)
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_plane_state {
+       struct drm_crtc *crtc;   /* do not write directly, use drm_atomic_set_crtc_for_plane() */
+       struct drm_framebuffer *fb;  /* do not write directly, use drm_atomic_set_fb_for_plane() */
+       struct fence *fence;
+
+       /* Signed dest location allows it to be partially off screen */
+       int32_t crtc_x, crtc_y;
+       uint32_t crtc_w, crtc_h;
+
+       /* Source values are 16.16 fixed point */
+       uint32_t src_x, src_y;
+       uint32_t src_h, src_w;
+
+       struct drm_atomic_state *state;
 };
 
+
 /**
- * drm_plane_funcs - driver plane control functions
+ * struct drm_plane_funcs - driver plane control functions
  * @update_plane: update the plane configuration
  * @disable_plane: shut down the plane
  * @destroy: clean up plane resources
+ * @reset: reset plane after state has been invalidated (e.g. resume)
  * @set_property: called when a property is changed
+ * @atomic_duplicate_state: duplicate the atomic state for this plane
+ * @atomic_destroy_state: destroy an atomic state for this plane
+ * @atomic_set_property: set a property on an atomic state for this plane
  */
 struct drm_plane_funcs {
        int (*update_plane)(struct drm_plane *plane,
@@ -585,6 +721,15 @@ struct drm_plane_funcs {
 
        int (*set_property)(struct drm_plane *plane,
                            struct drm_property *property, uint64_t val);
+
+       /* atomic update handling */
+       struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
+       void (*atomic_destroy_state)(struct drm_plane *plane,
+                                    struct drm_plane_state *state);
+       int (*atomic_set_property)(struct drm_plane *plane,
+                                  struct drm_plane_state *state,
+                                  struct drm_property *property,
+                                  uint64_t val);
 };
 
 enum drm_plane_type {
@@ -594,7 +739,7 @@ enum drm_plane_type {
 };
 
 /**
- * drm_plane - central DRM plane control structure
+ * struct drm_plane - central DRM plane control structure
  * @dev: DRM device this plane belongs to
  * @head: for list management
  * @base: base mode object
@@ -603,14 +748,19 @@ enum drm_plane_type {
  * @format_count: number of formats supported
  * @crtc: currently bound CRTC
  * @fb: currently bound fb
+ * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
+ *     drm_mode_set_config_internal() to implement correct refcounting.
  * @funcs: helper functions
  * @properties: property tracking for this plane
  * @type: type of plane (overlay, primary, cursor)
+ * @state: current atomic state for this plane
  */
 struct drm_plane {
        struct drm_device *dev;
        struct list_head head;
 
+       struct drm_modeset_lock mutex;
+
        struct drm_mode_object base;
 
        uint32_t possible_crtcs;
@@ -620,8 +770,6 @@ struct drm_plane {
        struct drm_crtc *crtc;
        struct drm_framebuffer *fb;
 
-       /* Temporary tracking of the old fb while a modeset is ongoing. Used
-        * by drm_mode_set_config_internal to implement correct refcounting. */
        struct drm_framebuffer *old_fb;
 
        const struct drm_plane_funcs *funcs;
@@ -629,10 +777,14 @@ struct drm_plane {
        struct drm_object_properties properties;
 
        enum drm_plane_type type;
+
+       void *helper_private;
+
+       struct drm_plane_state *state;
 };
 
 /**
- * drm_bridge_funcs - drm_bridge control functions
+ * struct drm_bridge_funcs - drm_bridge control functions
  * @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
  * @disable: Called right before encoder prepare, disables the bridge
  * @post_disable: Called right after encoder prepare, for lockstepped disable
@@ -656,7 +808,7 @@ struct drm_bridge_funcs {
 };
 
 /**
- * drm_bridge - central DRM bridge control structure
+ * struct drm_bridge - central DRM bridge control structure
  * @dev: DRM device this bridge belongs to
  * @head: list management
  * @base: base mode object
@@ -674,8 +826,35 @@ struct drm_bridge {
 };
 
 /**
- * drm_mode_set - new values for a CRTC config change
- * @head: list management
+ * struct struct drm_atomic_state - the global state object for atomic updates
+ * @dev: parent DRM device
+ * @flags: state flags like async update
+ * @planes: pointer to array of plane pointers
+ * @plane_states: pointer to array of plane states pointers
+ * @crtcs: pointer to array of CRTC pointers
+ * @crtc_states: pointer to array of CRTC states pointers
+ * @num_connector: size of the @connectors and @connector_states arrays
+ * @connectors: pointer to array of connector pointers
+ * @connector_states: pointer to array of connector states pointers
+ * @acquire_ctx: acquire context for this atomic modeset state update
+ */
+struct drm_atomic_state {
+       struct drm_device *dev;
+       uint32_t flags;
+       struct drm_plane **planes;
+       struct drm_plane_state **plane_states;
+       struct drm_crtc **crtcs;
+       struct drm_crtc_state **crtc_states;
+       int num_connector;
+       struct drm_connector **connectors;
+       struct drm_connector_state **connector_states;
+
+       struct drm_modeset_acquire_ctx *acquire_ctx;
+};
+
+
+/**
+ * struct drm_mode_set - new values for a CRTC config change
  * @fb: framebuffer to use for new config
  * @crtc: CRTC whose configuration we're about to change
  * @mode: mode timings to use
@@ -705,6 +884,9 @@ struct drm_mode_set {
  * struct drm_mode_config_funcs - basic driver provided mode setting functions
  * @fb_create: create a new framebuffer object
  * @output_poll_changed: function to handle output configuration changes
+ * @atomic_check: check whether a give atomic state update is possible
+ * @atomic_commit: commit an atomic state update previously verified with
+ *     atomic_check()
  *
  * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
  * involve drivers.
@@ -714,13 +896,20 @@ struct drm_mode_config_funcs {
                                             struct drm_file *file_priv,
                                             struct drm_mode_fb_cmd2 *mode_cmd);
        void (*output_poll_changed)(struct drm_device *dev);
+
+       int (*atomic_check)(struct drm_device *dev,
+                           struct drm_atomic_state *a);
+       int (*atomic_commit)(struct drm_device *dev,
+                            struct drm_atomic_state *a,
+                            bool async);
 };
 
 /**
- * drm_mode_group - group of mode setting resources for potential sub-grouping
+ * struct drm_mode_group - group of mode setting resources for potential sub-grouping
  * @num_crtcs: CRTC count
  * @num_encoders: encoder count
  * @num_connectors: connector count
+ * @num_bridges: bridge count
  * @id_list: list of KMS object IDs in this group
  *
  * Currently this simply tracks the global mode setting state.  But in the
@@ -740,10 +929,14 @@ struct drm_mode_group {
 };
 
 /**
- * drm_mode_config - Mode configuration control structure
+ * struct drm_mode_config - Mode configuration control structure
  * @mutex: mutex protecting KMS related lists and structures
+ * @connection_mutex: ww mutex protecting connector state and routing
+ * @acquire_ctx: global implicit acquire context used by atomic drivers for
+ *     legacy ioctls
  * @idr_mutex: mutex for KMS ID allocation and management
  * @crtc_idr: main KMS ID tracking object
+ * @fb_lock: mutex to protect fb state and lists
  * @num_fb: number of fbs available
  * @fb_list: list of framebuffers available
  * @num_connector: number of connectors on this device
@@ -752,17 +945,28 @@ struct drm_mode_group {
  * @bridge_list: list of bridge objects
  * @num_encoder: number of encoders on this device
  * @encoder_list: list of encoder objects
+ * @num_overlay_plane: number of overlay planes on this device
+ * @num_total_plane: number of universal (i.e. with primary/curso) planes on this device
+ * @plane_list: list of plane objects
  * @num_crtc: number of CRTCs on this device
  * @crtc_list: list of CRTC objects
+ * @property_list: list of property objects
  * @min_width: minimum pixel width on this device
  * @min_height: minimum pixel height on this device
  * @max_width: maximum pixel width on this device
  * @max_height: maximum pixel height on this device
  * @funcs: core driver provided mode setting functions
  * @fb_base: base address of the framebuffer
- * @poll_enabled: track polling status for this device
+ * @poll_enabled: track polling support for this device
+ * @poll_running: track polling status for this device
  * @output_poll_work: delayed work for polling in process context
+ * @property_blob_list: list of all the blob property objects
  * @*_property: core property tracking
+ * @preferred_depth: preferred RBG pixel depth, used by fb helpers
+ * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
+ * @async_page_flip: does this device support async flips on the primary plane?
+ * @cursor_width: hint to userspace for max cursor width
+ * @cursor_height: hint to userspace for max cursor height
  *
  * Core mode resource tracking structure.  All CRTC, encoders, and connectors
  * enumerated by the driver are added here, as are global properties.  Some
@@ -776,14 +980,7 @@ struct drm_mode_config {
        struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
        /* this is limited to one for now */
 
-
-       /**
-        * fb_lock - mutex to protect fb state
-        *
-        * Besides the global fb list his also protects the fbs list in the
-        * file_priv
-        */
-       struct mutex fb_lock;
+       struct mutex fb_lock; /* proctects global and per-file fb lists */
        int num_fb;
        struct list_head fb_list;
 
@@ -851,6 +1048,10 @@ struct drm_mode_config {
        struct drm_property *aspect_ratio_property;
        struct drm_property *dirty_info_property;
 
+       /* properties for virtual machine layout */
+       struct drm_property *suggested_x_property;
+       struct drm_property *suggested_y_property;
+
        /* dumb ioctl parameters */
        uint32_t preferred_depth, prefer_shadow;
 
@@ -861,6 +1062,19 @@ struct drm_mode_config {
        uint32_t cursor_width, cursor_height;
 };
 
+/**
+ * drm_for_each_plane_mask - iterate over planes specified by bitmask
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ * @plane_mask: bitmask of plane indices
+ *
+ * Iterate over all planes specified by bitmask.
+ */
+#define drm_for_each_plane_mask(plane, dev, plane_mask) \
+       list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
+               if ((plane_mask) & (1 << drm_plane_index(plane)))
+
+
 #define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
 #define obj_to_connector(x) container_of(x, struct drm_connector, base)
 #define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
@@ -880,9 +1094,6 @@ extern int drm_crtc_init_with_planes(struct drm_device *dev,
                                     struct drm_plane *primary,
                                     struct drm_plane *cursor,
                                     const struct drm_crtc_funcs *funcs);
-extern int drm_crtc_init(struct drm_device *dev,
-                        struct drm_crtc *crtc,
-                        const struct drm_crtc_funcs *funcs);
 extern void drm_crtc_cleanup(struct drm_crtc *crtc);
 extern unsigned int drm_crtc_index(struct drm_crtc *crtc);
 
@@ -978,9 +1189,9 @@ extern void drm_mode_config_reset(struct drm_device *dev);
 extern void drm_mode_config_cleanup(struct drm_device *dev);
 
 extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
-                                               char *path);
+                                               const char *path);
 extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
-                                               struct edid *edid);
+                                                  const struct edid *edid);
 
 static inline bool drm_property_type_is(struct drm_property *property,
                uint32_t type)
@@ -1041,11 +1252,13 @@ extern void drm_property_destroy(struct drm_device *dev, struct drm_property *pr
 extern int drm_property_add_enum(struct drm_property *property, int index,
                                 uint64_t value, const char *name);
 extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
-extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
-                                    char *formats[]);
+extern int drm_mode_create_tv_properties(struct drm_device *dev,
+                                        unsigned int num_modes,
+                                        char *modes[]);
 extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
 extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
 extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
+extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
 
 extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
                                             struct drm_encoder *encoder);
index a3d75fefd01020b520be0d9b35958f9462b4aec4..7adbb65ea8aeaa92eb750bab5dad3a2c8fef28eb 100644 (file)
@@ -68,6 +68,7 @@ struct drm_crtc_helper_funcs {
        int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
                        struct drm_display_mode *adjusted_mode, int x, int y,
                        struct drm_framebuffer *old_fb);
+       void (*mode_set_nofb)(struct drm_crtc *crtc);
 
        /* Move the crtc on the current fb to the given position *optional* */
        int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
@@ -81,6 +82,12 @@ struct drm_crtc_helper_funcs {
 
        /* disable crtc when not in use - more explicit than dpms off */
        void (*disable)(struct drm_crtc *crtc);
+
+       /* atomic helpers */
+       int (*atomic_check)(struct drm_crtc *crtc,
+                           struct drm_crtc_state *state);
+       void (*atomic_begin)(struct drm_crtc *crtc);
+       void (*atomic_flush)(struct drm_crtc *crtc);
 };
 
 /**
@@ -161,6 +168,12 @@ static inline void drm_connector_helper_add(struct drm_connector *connector,
 
 extern void drm_helper_resume_force_mode(struct drm_device *dev);
 
+int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+                            struct drm_display_mode *adjusted_mode, int x, int y,
+                            struct drm_framebuffer *old_fb);
+int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+                                 struct drm_framebuffer *old_fb);
+
 /* drm_probe_helper.c */
 extern int drm_helper_probe_single_connector_modes(struct drm_connector
                                                   *connector, uint32_t maxX,
index 9305c718d789b703c3a073f339c9c9b7e0ea352e..11f8c84f98ce51b1ca903d6ab02f4a65d50ff3b6 100644 (file)
 #define DP_TEST_CRC_B_CB                   0x244
 
 #define DP_TEST_SINK_MISC                  0x246
-#define DP_TEST_CRC_SUPPORTED              (1 << 5)
+# define DP_TEST_CRC_SUPPORTED             (1 << 5)
+# define DP_TEST_COUNT_MASK                0x7
 
 #define DP_TEST_RESPONSE                   0x260
 # define DP_TEST_ACK                       (1 << 0)
 #define DP_TEST_EDID_CHECKSUM              0x261
 
 #define DP_TEST_SINK                       0x270
-#define DP_TEST_SINK_START         (1 << 0)
+# define DP_TEST_SINK_START                (1 << 0)
 
 #define DP_PAYLOAD_TABLE_UPDATE_STATUS      0x2c0   /* 1.2 MST */
 # define DP_PAYLOAD_TABLE_UPDATED           (1 << 0)
 #define MODE_I2C_READ  4
 #define MODE_I2C_STOP  8
 
-/**
- * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
- *                              aux algorithm
- * @running: set by the algo indicating whether an i2c is ongoing or whether
- *          the i2c bus is quiescent
- * @address: i2c target address for the currently ongoing transfer
- * @aux_ch: driver callback to transfer a single byte of the i2c payload
- */
-struct i2c_algo_dp_aux_data {
-       bool running;
-       u16 address;
-       int (*aux_ch) (struct i2c_adapter *adapter,
-                      int mode, uint8_t write_byte,
-                      uint8_t *read_byte);
-};
-
-int
-i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
-
-
 #define DP_LINK_STATUS_SIZE       6
 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
                          int lane_count);
@@ -550,6 +531,7 @@ struct drm_dp_aux {
        struct mutex hw_mutex;
        ssize_t (*transfer)(struct drm_dp_aux *aux,
                            struct drm_dp_aux_msg *msg);
+       unsigned i2c_nack_count, i2c_defer_count;
 };
 
 ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
index 338fc1053835e5896a9046741c1599a54a3d24b2..cec6383bbdb8afbdd092ff0a869446070a37adfe 100644 (file)
@@ -28,7 +28,7 @@
 struct drm_dp_mst_branch;
 
 /**
- * struct drm_dp_vcpi - Virtual Channel Payload Identifer
+ * struct drm_dp_vcpi - Virtual Channel Payload Identifier
  * @vcpi: Virtual channel ID.
  * @pbn: Payload Bandwidth Number for this channel
  * @aligned_pbn: PBN aligned with slot size
@@ -371,7 +371,7 @@ struct drm_dp_sideband_msg_tx {
 struct drm_dp_mst_topology_mgr;
 struct drm_dp_mst_topology_cbs {
        /* create a connector for a port */
-       struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *path);
+       struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
        void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
                                  struct drm_connector *connector);
        void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
index b96031d947a0c5a56eb9cb57072fc7e5d655f901..d59240ffb1f7cc9f14b41a23b3cd9ba099a959c8 100644 (file)
@@ -207,6 +207,61 @@ struct detailed_timing {
 #define DRM_EDID_HDMI_DC_30               (1 << 4)
 #define DRM_EDID_HDMI_DC_Y444             (1 << 3)
 
+/* ELD Header Block */
+#define DRM_ELD_HEADER_BLOCK_SIZE      4
+
+#define DRM_ELD_VER                    0
+# define DRM_ELD_VER_SHIFT             3
+# define DRM_ELD_VER_MASK              (0x1f << 3)
+
+#define DRM_ELD_BASELINE_ELD_LEN       2       /* in dwords! */
+
+/* ELD Baseline Block for ELD_Ver == 2 */
+#define DRM_ELD_CEA_EDID_VER_MNL       4
+# define DRM_ELD_CEA_EDID_VER_SHIFT    5
+# define DRM_ELD_CEA_EDID_VER_MASK     (7 << 5)
+# define DRM_ELD_CEA_EDID_VER_NONE     (0 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861   (1 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861A  (2 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861BCD        (3 << 5)
+# define DRM_ELD_MNL_SHIFT             0
+# define DRM_ELD_MNL_MASK              (0x1f << 0)
+
+#define DRM_ELD_SAD_COUNT_CONN_TYPE    5
+# define DRM_ELD_SAD_COUNT_SHIFT       4
+# define DRM_ELD_SAD_COUNT_MASK                (0xf << 4)
+# define DRM_ELD_CONN_TYPE_SHIFT       2
+# define DRM_ELD_CONN_TYPE_MASK                (3 << 2)
+# define DRM_ELD_CONN_TYPE_HDMI                (0 << 2)
+# define DRM_ELD_CONN_TYPE_DP          (1 << 2)
+# define DRM_ELD_SUPPORTS_AI           (1 << 1)
+# define DRM_ELD_SUPPORTS_HDCP         (1 << 0)
+
+#define DRM_ELD_AUD_SYNCH_DELAY                6       /* in units of 2 ms */
+# define DRM_ELD_AUD_SYNCH_DELAY_MAX   0xfa    /* 500 ms */
+
+#define DRM_ELD_SPEAKER                        7
+# define DRM_ELD_SPEAKER_RLRC          (1 << 6)
+# define DRM_ELD_SPEAKER_FLRC          (1 << 5)
+# define DRM_ELD_SPEAKER_RC            (1 << 4)
+# define DRM_ELD_SPEAKER_RLR           (1 << 3)
+# define DRM_ELD_SPEAKER_FC            (1 << 2)
+# define DRM_ELD_SPEAKER_LFE           (1 << 1)
+# define DRM_ELD_SPEAKER_FLR           (1 << 0)
+
+#define DRM_ELD_PORT_ID                        8       /* offsets 8..15 inclusive */
+# define DRM_ELD_PORT_ID_LEN           8
+
+#define DRM_ELD_MANUFACTURER_NAME0     16
+#define DRM_ELD_MANUFACTURER_NAME1     17
+
+#define DRM_ELD_PRODUCT_CODE0          18
+#define DRM_ELD_PRODUCT_CODE1          19
+
+#define DRM_ELD_MONITOR_NAME_STRING    20      /* offsets 20..(20+mnl-1) inclusive */
+
+#define DRM_ELD_CEA_SAD(mnl, sad)      (20 + (mnl) + 3 * (sad))
+
 struct edid {
        u8 header[8];
        /* Vendor & product info */
@@ -279,4 +334,56 @@ int
 drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
                                            const struct drm_display_mode *mode);
 
+/**
+ * drm_eld_mnl - Get ELD monitor name length in bytes.
+ * @eld: pointer to an eld memory structure with mnl set
+ */
+static inline int drm_eld_mnl(const uint8_t *eld)
+{
+       return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
+}
+
+/**
+ * drm_eld_sad_count - Get ELD SAD count.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline int drm_eld_sad_count(const uint8_t *eld)
+{
+       return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >>
+               DRM_ELD_SAD_COUNT_SHIFT;
+}
+
+/**
+ * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes
+ * @eld: pointer to an eld memory structure with mnl and sad_count set
+ *
+ * This is a helper for determining the payload size of the baseline block, in
+ * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block.
+ */
+static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld)
+{
+       return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE +
+               drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3;
+}
+
+/**
+ * drm_eld_size - Get ELD size in bytes
+ * @eld: pointer to a complete eld memory structure
+ *
+ * The returned value does not include the vendor block. It's vendor specific,
+ * and comprises of the remaining bytes in the ELD memory buffer after
+ * drm_eld_size() bytes of header and baseline block.
+ *
+ * The returned value is guaranteed to be a multiple of 4.
+ */
+static inline int drm_eld_size(const uint8_t *eld)
+{
+       return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
+}
+
+struct edid *drm_do_get_edid(struct drm_connector *connector,
+       int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
+                             size_t len),
+       void *data);
+
 #endif /* __DRM_EDID_H__ */
index 9eed34dcd6afea74a8438f5571552cd472191a45..d387cf06ae05676667cd91afc37f61d57ae6c749 100644 (file)
@@ -25,6 +25,7 @@
 #define DRM_FLIP_WORK_H
 
 #include <linux/kfifo.h>
+#include <linux/spinlock.h>
 #include <linux/workqueue.h>
 
 /**
@@ -32,9 +33,9 @@
  *
  * Util to queue up work to run from work-queue context after flip/vblank.
  * Typically this can be used to defer unref of framebuffer's, cursor
- * bo's, etc until after vblank.  The APIs are all safe (and lockless)
- * for up to one producer and once consumer at a time.  The single-consumer
- * aspect is ensured by committing the queued work to a single work-queue.
+ * bo's, etc until after vblank.  The APIs are all thread-safe.
+ * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called
+ * in atomic context.
  */
 
 struct drm_flip_work;
@@ -50,27 +51,41 @@ struct drm_flip_work;
  */
 typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
 
+/**
+ * struct drm_flip_task - flip work task
+ * @node: list entry element
+ * @data: data to pass to work->func
+ */
+struct drm_flip_task {
+       struct list_head node;
+       void *data;
+};
+
 /**
  * struct drm_flip_work - flip work queue
  * @name: debug name
- * @pending: number of queued but not committed items
- * @count: number of committed items
  * @func: callback fxn called for each committed item
  * @worker: worker which calls @func
- * @fifo: queue of committed items
+ * @queued: queued tasks
+ * @commited: commited tasks
+ * @lock: lock to access queued and commited lists
  */
 struct drm_flip_work {
        const char *name;
-       atomic_t pending, count;
        drm_flip_func_t func;
        struct work_struct worker;
-       DECLARE_KFIFO_PTR(fifo, void *);
+       struct list_head queued;
+       struct list_head commited;
+       spinlock_t lock;
 };
 
+struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags);
+void drm_flip_work_queue_task(struct drm_flip_work *work,
+                             struct drm_flip_task *task);
 void drm_flip_work_queue(struct drm_flip_work *work, void *val);
 void drm_flip_work_commit(struct drm_flip_work *work,
                struct workqueue_struct *wq);
-int drm_flip_work_init(struct drm_flip_work *work, int size,
+void drm_flip_work_init(struct drm_flip_work *work,
                const char *name, drm_flip_func_t func);
 void drm_flip_work_cleanup(struct drm_flip_work *work);
 
index 1e6ae1458f7ab98ef42cd66ef9834cef8f276718..780511a459c01e3012efbfe2912ee27dc5996971 100644 (file)
@@ -119,6 +119,13 @@ struct drm_gem_object {
         * simply leave it as NULL.
         */
        struct dma_buf_attachment *import_attach;
+
+       /**
+        * dumb - created as dumb buffer
+        * Whether the gem object was created using the dumb buffer interface
+        * as such it may not be used for GPU rendering.
+        */
+       bool dumb;
 };
 
 void drm_gem_object_release(struct drm_gem_object *obj);
index 2ff35f3de9c5b9520587daca98f2c4d67b17bab4..acd6af8a8e675b5b58856651f18f788257bcc868 100644 (file)
@@ -4,6 +4,13 @@
 #include <drm/drmP.h>
 #include <drm/drm_gem.h>
 
+/**
+ * struct drm_gem_cma_object - GEM object backed by CMA memory allocations
+ * @base: base GEM object
+ * @paddr: physical address of the backing memory
+ * @sgt: scatter/gather table for imported PRIME buffers
+ * @vaddr: kernel virtual address of the backing memory
+ */
 struct drm_gem_cma_object {
        struct drm_gem_object base;
        dma_addr_t paddr;
@@ -19,23 +26,30 @@ to_drm_gem_cma_obj(struct drm_gem_object *gem_obj)
        return container_of(gem_obj, struct drm_gem_cma_object, base);
 }
 
-/* free gem object. */
+/* free GEM object */
 void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
 
-/* create memory region for drm framebuffer. */
+/* create memory region for DRM framebuffer */
+int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
+                                    struct drm_device *drm,
+                                    struct drm_mode_create_dumb *args);
+
+/* create memory region for DRM framebuffer */
 int drm_gem_cma_dumb_create(struct drm_file *file_priv,
-               struct drm_device *drm, struct drm_mode_create_dumb *args);
+                           struct drm_device *drm,
+                           struct drm_mode_create_dumb *args);
 
-/* map memory region for drm framebuffer to user space. */
+/* map memory region for DRM framebuffer to user space */
 int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
-               struct drm_device *drm, uint32_t handle, uint64_t *offset);
+                               struct drm_device *drm, u32 handle,
+                               u64 *offset);
 
-/* set vm_flags and we can change the vm attribute to other one at here. */
+/* set vm_flags and we can change the VM attribute to other one at here */
 int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
 
-/* allocate physical memory. */
+/* allocate physical memory */
 struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
-               unsigned int size);
+                                             size_t size);
 
 extern const struct vm_operations_struct drm_gem_cma_vm_ops;
 
index 8569dc5a1026a2a986ca5d15bd7e9575dd19723d..f1d8d0dbb4f162f9ea1c90dda44beedf14e4c175 100644 (file)
@@ -26,6 +26,7 @@ struct mipi_dsi_device;
  * struct mipi_dsi_msg - read/write DSI buffer
  * @channel: virtual channel id
  * @type: payload data type
+ * @flags: flags controlling this message transmission
  * @tx_len: length of @tx_buf
  * @tx_buf: data to be written
  * @rx_len: length of @rx_buf
@@ -43,12 +44,44 @@ struct mipi_dsi_msg {
        void *rx_buf;
 };
 
+bool mipi_dsi_packet_format_is_short(u8 type);
+bool mipi_dsi_packet_format_is_long(u8 type);
+
+/**
+ * struct mipi_dsi_packet - represents a MIPI DSI packet in protocol format
+ * @size: size (in bytes) of the packet
+ * @header: the four bytes that make up the header (Data ID, Word Count or
+ *     Packet Data, and ECC)
+ * @payload_length: number of bytes in the payload
+ * @payload: a pointer to a buffer containing the payload, if any
+ */
+struct mipi_dsi_packet {
+       size_t size;
+       u8 header[4];
+       size_t payload_length;
+       const u8 *payload;
+};
+
+int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
+                          const struct mipi_dsi_msg *msg);
+
 /**
  * struct mipi_dsi_host_ops - DSI bus operations
  * @attach: attach DSI device to DSI host
  * @detach: detach DSI device from DSI host
- * @transfer: send and/or receive DSI packet, return number of received bytes,
- *           or error
+ * @transfer: transmit a DSI packet
+ *
+ * DSI packets transmitted by .transfer() are passed in as mipi_dsi_msg
+ * structures. This structure contains information about the type of packet
+ * being transmitted as well as the transmit and receive buffers. When an
+ * error is encountered during transmission, this function will return a
+ * negative error code. On success it shall return the number of bytes
+ * transmitted for write packets or the number of bytes received for read
+ * packets.
+ *
+ * Note that typically DSI packet transmission is atomic, so the .transfer()
+ * function will seldomly return anything other than the number of bytes
+ * contained in the transmit buffer on success.
  */
 struct mipi_dsi_host_ops {
        int (*attach)(struct mipi_dsi_host *host,
@@ -56,7 +89,7 @@ struct mipi_dsi_host_ops {
        int (*detach)(struct mipi_dsi_host *host,
                      struct mipi_dsi_device *dsi);
        ssize_t (*transfer)(struct mipi_dsi_host *host,
-                           struct mipi_dsi_msg *msg);
+                           const struct mipi_dsi_msg *msg);
 };
 
 /**
@@ -130,12 +163,57 @@ static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
        return container_of(dev, struct mipi_dsi_device, dev);
 }
 
+struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
 int mipi_dsi_attach(struct mipi_dsi_device *dsi);
 int mipi_dsi_detach(struct mipi_dsi_device *dsi);
-ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
-                           size_t len);
+int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
+                                           u16 value);
+
+ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
+                              size_t size);
+ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
+                             size_t num_params, void *data, size_t size);
+
+/**
+ * enum mipi_dsi_dcs_tear_mode - Tearing Effect Output Line mode
+ * @MIPI_DSI_DCS_TEAR_MODE_VBLANK: the TE output line consists of V-Blanking
+ *    information only
+ * @MIPI_DSI_DCS_TEAR_MODE_VHBLANK : the TE output line consists of both
+ *    V-Blanking and H-Blanking information
+ */
+enum mipi_dsi_dcs_tear_mode {
+       MIPI_DSI_DCS_TEAR_MODE_VBLANK,
+       MIPI_DSI_DCS_TEAR_MODE_VHBLANK,
+};
+
+#define MIPI_DSI_DCS_POWER_MODE_DISPLAY (1 << 2)
+#define MIPI_DSI_DCS_POWER_MODE_NORMAL  (1 << 3)
+#define MIPI_DSI_DCS_POWER_MODE_SLEEP   (1 << 4)
+#define MIPI_DSI_DCS_POWER_MODE_PARTIAL (1 << 5)
+#define MIPI_DSI_DCS_POWER_MODE_IDLE    (1 << 6)
+
+ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
+                                 const void *data, size_t len);
+ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
+                          const void *data, size_t len);
 ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
                          size_t len);
+int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode);
+int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format);
+int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
+                                   u16 end);
+int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
+                                 u16 end);
+int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
+                            enum mipi_dsi_dcs_tear_mode mode);
+int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format);
 
 /**
  * struct mipi_dsi_driver - DSI driver
@@ -167,9 +245,13 @@ static inline void mipi_dsi_set_drvdata(struct mipi_dsi_device *dsi, void *data)
        dev_set_drvdata(&dsi->dev, data);
 }
 
-int mipi_dsi_driver_register(struct mipi_dsi_driver *driver);
+int mipi_dsi_driver_register_full(struct mipi_dsi_driver *driver,
+                                 struct module *owner);
 void mipi_dsi_driver_unregister(struct mipi_dsi_driver *driver);
 
+#define mipi_dsi_driver_register(driver) \
+       mipi_dsi_driver_register_full(driver, THIS_MODULE)
+
 #define module_mipi_dsi_driver(__mipi_dsi_driver) \
        module_driver(__mipi_dsi_driver, mipi_dsi_driver_register, \
                        mipi_dsi_driver_unregister)
index 75a5c45e21c72f2ca34da9f9ccb4d7e2908bd7e3..70595ff565baa45190faaae5c42630fe04a1beff 100644 (file)
@@ -33,6 +33,7 @@ struct drm_modeset_lock;
  * @ww_ctx: base acquire ctx
  * @contended: used internally for -EDEADLK handling
  * @locked: list of held locks
+ * @trylock_only: trylock mode used in atomic contexts/panic notifiers
  *
  * Each thread competing for a set of locks must use one acquire
  * ctx.  And if any lock fxn returns -EDEADLK, it must backoff and
@@ -126,11 +127,13 @@ void drm_modeset_unlock(struct drm_modeset_lock *lock);
 
 struct drm_device;
 struct drm_crtc;
+struct drm_plane;
 
 void drm_modeset_lock_all(struct drm_device *dev);
 int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
 void drm_modeset_unlock_all(struct drm_device *dev);
-void drm_modeset_lock_crtc(struct drm_crtc *crtc);
+void drm_modeset_lock_crtc(struct drm_crtc *crtc,
+                          struct drm_plane *plane);
 void drm_modeset_unlock_crtc(struct drm_crtc *crtc);
 void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
 struct drm_modeset_acquire_ctx *
index 52e6870534b24ad90bec35526d23a4dda690765d..a185392cafebdb792fa386ef632bf9b950142184 100644 (file)
@@ -25,6 +25,7 @@
 #define DRM_PLANE_HELPER_H
 
 #include <drm/drm_rect.h>
+#include <drm/drm_crtc.h>
 
 /*
  * Drivers that don't allow primary plane scaling may pass this macro in place
  * planes.
  */
 
+extern int drm_crtc_init(struct drm_device *dev,
+                        struct drm_crtc *crtc,
+                        const struct drm_crtc_funcs *funcs);
+
+/**
+ * drm_plane_helper_funcs - helper operations for CRTCs
+ * @prepare_fb: prepare a framebuffer for use by the plane
+ * @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane
+ * @atomic_check: check that a given atomic state is valid and can be applied
+ * @atomic_update: apply an atomic state to the plane
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
+struct drm_plane_helper_funcs {
+       int (*prepare_fb)(struct drm_plane *plane,
+                         struct drm_framebuffer *fb);
+       void (*cleanup_fb)(struct drm_plane *plane,
+                          struct drm_framebuffer *fb);
+
+       int (*atomic_check)(struct drm_plane *plane,
+                           struct drm_plane_state *state);
+       void (*atomic_update)(struct drm_plane *plane,
+                             struct drm_plane_state *old_state);
+};
+
+static inline void drm_plane_helper_add(struct drm_plane *plane,
+                                       const struct drm_plane_helper_funcs *funcs)
+{
+       plane->helper_private = (void *)funcs;
+}
+
 extern int drm_plane_helper_check_update(struct drm_plane *plane,
                                         struct drm_crtc *crtc,
                                         struct drm_framebuffer *fb,
@@ -68,4 +100,16 @@ extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
                                                         int num_formats);
 
 
+int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
+                           struct drm_framebuffer *fb,
+                           int crtc_x, int crtc_y,
+                           unsigned int crtc_w, unsigned int crtc_h,
+                           uint32_t src_x, uint32_t src_y,
+                           uint32_t src_w, uint32_t src_h);
+int drm_plane_helper_disable(struct drm_plane *plane);
+
+/* For use by drm_crtc_helper.c */
+int drm_plane_helper_commit(struct drm_plane *plane,
+                           struct drm_plane_state *plane_state,
+                           struct drm_framebuffer *old_fb);
 #endif
index a70d4564789862a823fd074bbdeef0b175148d11..180ad0e6de21dd3f0332335f90c122692a553838 100644 (file)
        INTEL_VGA_DEVICE(0x22b2, info), \
        INTEL_VGA_DEVICE(0x22b3, info)
 
+#define INTEL_SKL_IDS(info) \
+       INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
+       INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
+       INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
+       INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
+       INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
+       INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
+       INTEL_VGA_DEVICE(0x1912, info), /* DT  GT2 */ \
+       INTEL_VGA_DEVICE(0x1902, info), /* DT  GT1 */ \
+       INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
+       INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
+       INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
+       INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
+       INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
+       INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
+       INTEL_VGA_DEVICE(0x191D, info)  /* WKS GT2 */
+
 #endif /* _I915_PCIIDS_H */
index 460441714413c620b8313ca9390778e089257651..b620c317c7720a27127ca3fd3c54e67e374652ce 100644 (file)
@@ -68,6 +68,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
  *           non-blocking reserves should be tried.
  * @list:    thread private list of ttm_validate_buffer structs.
  * @intr:    should the wait be interruptible
+ * @dups:    [out] optional list of duplicates.
  *
  * Tries to reserve bos pointed to by the list entries for validation.
  * If the function returns 0, all buffers are marked as "unfenced",
@@ -83,6 +84,11 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
  * calling process receives a signal while waiting. In that case, no
  * buffers on the list will be reserved upon return.
  *
+ * If dups is non NULL all buffers already reserved by the current thread
+ * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned
+ * on the first already reserved buffer and all buffers from the list are
+ * unreserved again.
+ *
  * Buffers reserved by this function should be unreserved by
  * a call to either ttm_eu_backoff_reservation() or
  * ttm_eu_fence_buffer_objects() when command submission is complete or
@@ -90,7 +96,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
  */
 
 extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
-                                 struct list_head *list, bool intr);
+                                 struct list_head *list, bool intr,
+                                 struct list_head *dups);
 
 /**
  * function ttm_eu_fence_buffer_objects.
index a929f86d0dddd52816d6fdfc41905b8fab76da88..d72b5b35f15edd965b89de2c4759bd9bc964f14f 100644 (file)
@@ -60,7 +60,7 @@
 #define ESC1_CLK_SRC                   43
 #define HDMI_CLK_SRC                   44
 #define VSYNC_CLK_SRC                  45
-#define RBCPR_CLK_SRC                  46
+#define MMSS_RBCPR_CLK_SRC             46
 #define RBBMTIMER_CLK_SRC              47
 #define MAPLE_CLK_SRC                  48
 #define VDP_CLK_SRC                    49
index d6b56b21539b7e4e73bb80d927fc906d4d811e5c..801c0ac50c47f3ba80df69b9db99fda95d0db5e2 100644 (file)
 #define VF610_CLK_FASK_CLK_SEL         8
 #define VF610_CLK_AUDIO_EXT            9
 #define VF610_CLK_ENET_EXT             10
-#define VF610_CLK_PLL1_MAIN            11
+#define VF610_CLK_PLL1_SYS             11
 #define VF610_CLK_PLL1_PFD1            12
 #define VF610_CLK_PLL1_PFD2            13
 #define VF610_CLK_PLL1_PFD3            14
 #define VF610_CLK_PLL1_PFD4            15
-#define VF610_CLK_PLL2_MAIN            16
+#define VF610_CLK_PLL2_BUS             16
 #define VF610_CLK_PLL2_PFD1            17
 #define VF610_CLK_PLL2_PFD2            18
 #define VF610_CLK_PLL2_PFD3            19
 #define VF610_CLK_PLL2_PFD4            20
-#define VF610_CLK_PLL3_MAIN            21
+#define VF610_CLK_PLL3_USB_OTG         21
 #define VF610_CLK_PLL3_PFD1            22
 #define VF610_CLK_PLL3_PFD2            23
 #define VF610_CLK_PLL3_PFD3            24
 #define VF610_CLK_PLL3_PFD4            25
-#define VF610_CLK_PLL4_MAIN            26
-#define VF610_CLK_PLL5_MAIN            27
-#define VF610_CLK_PLL6_MAIN            28
+#define VF610_CLK_PLL4_AUDIO           26
+#define VF610_CLK_PLL5_ENET            27
+#define VF610_CLK_PLL6_VIDEO           28
 #define VF610_CLK_PLL3_MAIN_DIV                29
 #define VF610_CLK_PLL4_MAIN_DIV                30
 #define VF610_CLK_PLL6_MAIN_DIV                31
 #define VF610_CLK_DMAMUX3              153
 #define VF610_CLK_FLEXCAN0_EN          154
 #define VF610_CLK_FLEXCAN1_EN          155
-#define VF610_CLK_PLL7_MAIN            156
+#define VF610_CLK_PLL7_USB_HOST                156
 #define VF610_CLK_USBPHY0              157
 #define VF610_CLK_USBPHY1              158
-#define VF610_CLK_END                  159
+#define VF610_CLK_LVDS1_IN             159
+#define VF610_CLK_ANACLK1              160
+#define VF610_CLK_PLL1_BYPASS_SRC      161
+#define VF610_CLK_PLL2_BYPASS_SRC      162
+#define VF610_CLK_PLL3_BYPASS_SRC      163
+#define VF610_CLK_PLL4_BYPASS_SRC      164
+#define VF610_CLK_PLL5_BYPASS_SRC      165
+#define VF610_CLK_PLL6_BYPASS_SRC      166
+#define VF610_CLK_PLL7_BYPASS_SRC      167
+#define VF610_CLK_PLL1                 168
+#define VF610_CLK_PLL2                 169
+#define VF610_CLK_PLL3                 170
+#define VF610_CLK_PLL4                 171
+#define VF610_CLK_PLL5                 172
+#define VF610_CLK_PLL6                 173
+#define VF610_CLK_PLL7                 174
+#define VF610_PLL1_BYPASS              175
+#define VF610_PLL2_BYPASS              176
+#define VF610_PLL3_BYPASS              177
+#define VF610_PLL4_BYPASS              178
+#define VF610_PLL5_BYPASS              179
+#define VF610_PLL6_BYPASS              180
+#define VF610_PLL7_BYPASS              181
+#define VF610_CLK_END                  182
 
 #endif /* __DT_BINDINGS_CLOCK_VF610_H */
index 3d33794e4f3edd75950e58cb7d5c49f69e33795b..7448edff4723101b44877090ce9463bdb98d6d9c 100644 (file)
@@ -40,8 +40,8 @@
 
 /* Active pin states */
 #define PIN_OUTPUT             (0 | PULL_DIS)
-#define PIN_OUTPUT_PULLUP      (PIN_OUTPUT | PULL_ENA | PULL_UP)
-#define PIN_OUTPUT_PULLDOWN    (PIN_OUTPUT | PULL_ENA)
+#define PIN_OUTPUT_PULLUP      (PULL_UP)
+#define PIN_OUTPUT_PULLDOWN    (0)
 #define PIN_INPUT              (INPUT_EN | PULL_DIS)
 #define PIN_INPUT_SLEW         (INPUT_EN | SLEWCONTROL)
 #define PIN_INPUT_PULLUP       (PULL_ENA | INPUT_EN | PULL_UP)
index be5fd38bd5a05d83eaac250055defcccacafe054..5d858e02997f5248d8a50150d870248e8146f4e6 100644 (file)
  * position @h. For example
  * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
  */
-#define GENMASK(h, l)          (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
-#define GENMASK_ULL(h, l)      (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#define GENMASK(h, l) \
+       (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+       (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
 
 extern unsigned int __sw_hweight8(unsigned int w);
 extern unsigned int __sw_hweight16(unsigned int w);
index 4e2bd4c95b66ff245fa78d64a455267ac646fcff..0995c2de8162c2f6368647503ddda9017a68f6c6 100644 (file)
@@ -46,6 +46,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
 extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
 
 extern unsigned long free_all_bootmem(void);
+extern void reset_node_managed_pages(pg_data_t *pgdat);
 extern void reset_all_zones_managed_pages(void);
 
 extern void free_bootmem_node(pg_data_t *pgdat,
index 6992afc6ba7f96fda9dba202f771fe7e28f807fe..b37ea95bc348f15b16a9c0d095588379b12edbee 100644 (file)
@@ -99,6 +99,12 @@ inval_skb:
        return 1;
 }
 
+static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+{
+       /* the CAN specific type of skb is identified by its data length */
+       return skb->len == CANFD_MTU;
+}
+
 /* get data length from can_dlc with sanitized can_dlc */
 u8 can_dlc2len(u8 can_dlc);
 
index be21af149f119394c68bd8018a8de39f2db067ee..2839c639f0920942d1e835dd464598432e22e974 100644 (file)
@@ -352,7 +352,6 @@ struct clk_divider {
 #define CLK_DIVIDER_READ_ONLY          BIT(5)
 
 extern const struct clk_ops clk_divider_ops;
-extern const struct clk_ops clk_divider_ro_ops;
 struct clk *clk_register_divider(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 shift, u8 width,
index 0430ed05d3b9f0511975fbef5ca89ec14faf3e92..a93438beb33cf12529c44a8cb905e35df125edae 100644 (file)
@@ -18,12 +18,12 @@ struct cma;
 extern phys_addr_t cma_get_base(struct cma *cma);
 extern unsigned long cma_get_size(struct cma *cma);
 
-extern int __init cma_declare_contiguous(phys_addr_t size,
-                       phys_addr_t base, phys_addr_t limit,
+extern int __init cma_declare_contiguous(phys_addr_t base,
+                       phys_addr_t size, phys_addr_t limit,
                        phys_addr_t alignment, unsigned int order_per_bit,
                        bool fixed, struct cma **res_cma);
-extern int cma_init_reserved_mem(phys_addr_t size,
-                                       phys_addr_t base, int order_per_bit,
+extern int cma_init_reserved_mem(phys_addr_t base,
+                                       phys_addr_t size, int order_per_bit,
                                        struct cma **res_cma);
 extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
 extern bool cma_release(struct cma *cma, struct page *pages, int count);
index 11c0182a153b8a9025a4b4eab4486fe99a07949f..cbb5790a35cd7f360ac602ddca36ab331319a649 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (C) 2012 Avionic Design GmbH
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
  */
 
 #ifndef __LINUX_HDMI_H_
index 8bbd7bc1043d9c4d26ffff38b35b8540093fdb71..03fa332ad2a8cec4e26c212b9333e56f0c6d6169 100644 (file)
@@ -72,7 +72,7 @@ struct iio_event_data {
 
 #define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
 
-#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF)
+#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F)
 
 #define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
 
index 0068708161ffa6954f320d7de7d4ebb86dd2a889..0a21fbefdfbec1a693ed09c3d256b8059760655e 100644 (file)
@@ -242,7 +242,7 @@ static inline void in_dev_put(struct in_device *idev)
 static __inline__ __be32 inet_make_mask(int logmask)
 {
        if (logmask)
-               return htonl(~((1<<(32-logmask))-1));
+               return htonl(~((1U<<(32-logmask))-1));
        return 0;
 }
 
index 8422b4ed6882be8e8ece65f9a1bcd2ceb5af2bfb..b9376cd5a187e818c28c09935eb022a1920848a8 100644 (file)
@@ -77,11 +77,6 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
        return kstat_cpu(cpu).irqs_sum;
 }
 
-/*
- * Lock/unlock the current runqueue - to extract task statistics:
- */
-extern unsigned long long task_delta_exec(struct task_struct *);
-
 extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
 extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
 extern void account_steal_time(cputime_t);
index ea53b04993f22745028d402e0238a30c91595afb..a6059bdf7b03baa4955c069637f686b3d709d819 100644 (file)
@@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 
-bool kvm_is_mmio_pfn(pfn_t pfn);
+bool kvm_is_reserved_pfn(pfn_t pfn);
 
 struct kvm_irq_ack_notifier {
        struct hlist_node link;
index fc17d56581b2954c877eebcc637adca43a45321f..582e67f340543490ad917dbaf5fd01be25fafa14 100644 (file)
@@ -330,6 +330,13 @@ enum max77693_irq_source {
        MAX77693_IRQ_GROUP_NR,
 };
 
+#define SRC_IRQ_CHARGER                        BIT(0)
+#define SRC_IRQ_TOP                    BIT(1)
+#define SRC_IRQ_FLASH                  BIT(2)
+#define SRC_IRQ_MUIC                   BIT(3)
+#define SRC_IRQ_ALL                    (SRC_IRQ_CHARGER | SRC_IRQ_TOP \
+                                               | SRC_IRQ_FLASH | SRC_IRQ_MUIC)
+
 #define LED_IRQ_FLED2_OPEN             BIT(0)
 #define LED_IRQ_FLED2_SHORT            BIT(1)
 #define LED_IRQ_FLED1_OPEN             BIT(2)
index 88787bb4b3b93fe08d322c2a54a183486427a2af..94d19f64cecf000e55b21b9a8662d15b91658c11 100644 (file)
@@ -98,11 +98,11 @@ struct mmu_notifier_ops {
        /*
         * invalidate_range_start() and invalidate_range_end() must be
         * paired and are called only when the mmap_sem and/or the
-        * locks protecting the reverse maps are held. The subsystem
-        * must guarantee that no additional references are taken to
-        * the pages in the range established between the call to
-        * invalidate_range_start() and the matching call to
-        * invalidate_range_end().
+        * locks protecting the reverse maps are held. If the subsystem
+        * can't guarantee that no additional references are taken to
+        * the pages in the range, it has to implement the
+        * invalidate_range() notifier to remove any references taken
+        * after invalidate_range_start().
         *
         * Invalidation of multiple concurrent ranges may be
         * optionally permitted by the driver. Either way the
@@ -144,6 +144,29 @@ struct mmu_notifier_ops {
        void (*invalidate_range_end)(struct mmu_notifier *mn,
                                     struct mm_struct *mm,
                                     unsigned long start, unsigned long end);
+
+       /*
+        * invalidate_range() is either called between
+        * invalidate_range_start() and invalidate_range_end() when the
+        * VM has to free pages that where unmapped, but before the
+        * pages are actually freed, or outside of _start()/_end() when
+        * a (remote) TLB is necessary.
+        *
+        * If invalidate_range() is used to manage a non-CPU TLB with
+        * shared page-tables, it not necessary to implement the
+        * invalidate_range_start()/end() notifiers, as
+        * invalidate_range() alread catches the points in time when an
+        * external TLB range needs to be flushed.
+        *
+        * The invalidate_range() function is called under the ptl
+        * spin-lock and not allowed to sleep.
+        *
+        * Note that this function might be called with just a sub-range
+        * of what was passed to invalidate_range_start()/end(), if
+        * called between those functions.
+        */
+       void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
+                                unsigned long start, unsigned long end);
 };
 
 /*
@@ -190,6 +213,8 @@ extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
                                  unsigned long start, unsigned long end);
 extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
                                  unsigned long start, unsigned long end);
+extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
+                                 unsigned long start, unsigned long end);
 
 static inline void mmu_notifier_release(struct mm_struct *mm)
 {
@@ -242,6 +267,13 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
                __mmu_notifier_invalidate_range_end(mm, start, end);
 }
 
+static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+                                 unsigned long start, unsigned long end)
+{
+       if (mm_has_notifiers(mm))
+               __mmu_notifier_invalidate_range(mm, start, end);
+}
+
 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
 {
        mm->mmu_notifier_mm = NULL;
@@ -279,6 +311,44 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
        __young;                                                        \
 })
 
+#define        ptep_clear_flush_notify(__vma, __address, __ptep)               \
+({                                                                     \
+       unsigned long ___addr = __address & PAGE_MASK;                  \
+       struct mm_struct *___mm = (__vma)->vm_mm;                       \
+       pte_t ___pte;                                                   \
+                                                                       \
+       ___pte = ptep_clear_flush(__vma, __address, __ptep);            \
+       mmu_notifier_invalidate_range(___mm, ___addr,                   \
+                                       ___addr + PAGE_SIZE);           \
+                                                                       \
+       ___pte;                                                         \
+})
+
+#define pmdp_clear_flush_notify(__vma, __haddr, __pmd)                 \
+({                                                                     \
+       unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;              \
+       struct mm_struct *___mm = (__vma)->vm_mm;                       \
+       pmd_t ___pmd;                                                   \
+                                                                       \
+       ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd);               \
+       mmu_notifier_invalidate_range(___mm, ___haddr,                  \
+                                     ___haddr + HPAGE_PMD_SIZE);       \
+                                                                       \
+       ___pmd;                                                         \
+})
+
+#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd)                        \
+({                                                                     \
+       unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;              \
+       pmd_t ___pmd;                                                   \
+                                                                       \
+       ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd);              \
+       mmu_notifier_invalidate_range(__mm, ___haddr,                   \
+                                     ___haddr + HPAGE_PMD_SIZE);       \
+                                                                       \
+       ___pmd;                                                         \
+})
+
 /*
  * set_pte_at_notify() sets the pte _after_ running the notifier.
  * This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -342,6 +412,11 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
 {
 }
 
+static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+                                 unsigned long start, unsigned long end)
+{
+}
+
 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
 {
 }
@@ -352,6 +427,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
 
 #define ptep_clear_flush_young_notify ptep_clear_flush_young
 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
+#define        ptep_clear_flush_notify ptep_clear_flush
+#define pmdp_clear_flush_notify pmdp_clear_flush
+#define pmdp_get_and_clear_notify pmdp_get_and_clear
 #define set_pte_at_notify set_pte_at
 
 #endif /* CONFIG_MMU_NOTIFIER */
index 48bf12ef6620ccc863c27afc615a2e2e460a6c99..ffe66e381c04237fb54a0447741f39a40d7c5d71 100644 (file)
@@ -431,6 +431,15 @@ struct zone {
         */
        int                     nr_migrate_reserve_block;
 
+#ifdef CONFIG_MEMORY_ISOLATION
+       /*
+        * Number of isolated pageblock. It is used to solve incorrect
+        * freepage counting problem due to racy retrieving migratetype
+        * of pageblock. Protected by zone->lock.
+        */
+       unsigned long           nr_isolate_pageblock;
+#endif
+
 #ifdef CONFIG_MEMORY_HOTPLUG
        /* see spanned/present_pages for more description */
        seqlock_t               span_seqlock;
index 983876f24aed06ce26c7e33d8c83d77fa4736381..47ebb4fafd87f7ba0f37b36b22f54952f6f823c8 100644 (file)
@@ -1224,11 +1224,22 @@ struct nfs41_free_stateid_res {
        unsigned int                    status;
 };
 
+static inline void
+nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
+{
+       kfree(cinfo->buckets);
+}
+
 #else
 
 struct pnfs_ds_commit_info {
 };
 
+static inline void
+nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
+{
+}
+
 #endif /* CONFIG_NFS_V4_1 */
 
 #ifdef CONFIG_NFS_V4_2
index 6545e7aec7bb95dc41ad6225050cb19e7038287a..29f0adc5f3e47e7c97c4e3aed816437ea9a4fa17 100644 (file)
@@ -267,14 +267,12 @@ extern int of_property_read_u64(const struct device_node *np,
 extern int of_property_read_string(struct device_node *np,
                                   const char *propname,
                                   const char **out_string);
-extern int of_property_read_string_index(struct device_node *np,
-                                        const char *propname,
-                                        int index, const char **output);
 extern int of_property_match_string(struct device_node *np,
                                    const char *propname,
                                    const char *string);
-extern int of_property_count_strings(struct device_node *np,
-                                    const char *propname);
+extern int of_property_read_string_helper(struct device_node *np,
+                                             const char *propname,
+                                             const char **out_strs, size_t sz, int index);
 extern int of_device_is_compatible(const struct device_node *device,
                                   const char *);
 extern int of_device_is_available(const struct device_node *device);
@@ -486,15 +484,9 @@ static inline int of_property_read_string(struct device_node *np,
        return -ENOSYS;
 }
 
-static inline int of_property_read_string_index(struct device_node *np,
-                                               const char *propname, int index,
-                                               const char **out_string)
-{
-       return -ENOSYS;
-}
-
-static inline int of_property_count_strings(struct device_node *np,
-                                           const char *propname)
+static inline int of_property_read_string_helper(struct device_node *np,
+                                                const char *propname,
+                                                const char **out_strs, size_t sz, int index)
 {
        return -ENOSYS;
 }
@@ -667,6 +659,70 @@ static inline int of_property_count_u64_elems(const struct device_node *np,
        return of_property_count_elems_of_size(np, propname, sizeof(u64));
 }
 
+/**
+ * of_property_read_string_array() - Read an array of strings from a multiple
+ * strings property.
+ * @np:                device node from which the property value is to be read.
+ * @propname:  name of the property to be searched.
+ * @out_strs:  output array of string pointers.
+ * @sz:                number of array elements to read.
+ *
+ * Search for a property in a device tree node and retrieve a list of
+ * terminated string values (pointer to data, not a copy) in that property.
+ *
+ * If @out_strs is NULL, the number of strings in the property is returned.
+ */
+static inline int of_property_read_string_array(struct device_node *np,
+                                               const char *propname, const char **out_strs,
+                                               size_t sz)
+{
+       return of_property_read_string_helper(np, propname, out_strs, sz, 0);
+}
+
+/**
+ * of_property_count_strings() - Find and return the number of strings from a
+ * multiple strings property.
+ * @np:                device node from which the property value is to be read.
+ * @propname:  name of the property to be searched.
+ *
+ * Search for a property in a device tree node and retrieve the number of null
+ * terminated string contain in it. Returns the number of strings on
+ * success, -EINVAL if the property does not exist, -ENODATA if property
+ * does not have a value, and -EILSEQ if the string is not null-terminated
+ * within the length of the property data.
+ */
+static inline int of_property_count_strings(struct device_node *np,
+                                           const char *propname)
+{
+       return of_property_read_string_helper(np, propname, NULL, 0, 0);
+}
+
+/**
+ * of_property_read_string_index() - Find and read a string from a multiple
+ * strings property.
+ * @np:                device node from which the property value is to be read.
+ * @propname:  name of the property to be searched.
+ * @index:     index of the string in the list of strings
+ * @out_string:        pointer to null terminated return string, modified only if
+ *             return value is 0.
+ *
+ * Search for a property in a device tree node and retrieve a null
+ * terminated string value (pointer to data, not a copy) in the list of strings
+ * contained in that property.
+ * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
+ * property does not have a value, and -EILSEQ if the string is not
+ * null-terminated within the length of the property data.
+ *
+ * The out_string pointer is modified only if a valid string can be decoded.
+ */
+static inline int of_property_read_string_index(struct device_node *np,
+                                               const char *propname,
+                                               int index, const char **output)
+{
+       int rc = of_property_read_string_helper(np, propname, output, 1, index);
+       return rc < 0 ? rc : 0;
+}
+
 /**
  * of_property_read_bool - Findfrom a property
  * @np:                device node from which the property value is to be read.
index 3fff8e774067904bb73b5817ba471099a80555da..2dc1e1697b451ce678781a55776a15c8934be7a5 100644 (file)
@@ -2,6 +2,10 @@
 #define __LINUX_PAGEISOLATION_H
 
 #ifdef CONFIG_MEMORY_ISOLATION
+static inline bool has_isolate_pageblock(struct zone *zone)
+{
+       return zone->nr_isolate_pageblock;
+}
 static inline bool is_migrate_isolate_page(struct page *page)
 {
        return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
@@ -11,6 +15,10 @@ static inline bool is_migrate_isolate(int migratetype)
        return migratetype == MIGRATE_ISOLATE;
 }
 #else
+static inline bool has_isolate_pageblock(struct zone *zone)
+{
+       return false;
+}
 static inline bool is_migrate_isolate_page(struct page *page)
 {
        return false;
index 64dacb7288a6fc79e86ca536753f342a37252949..24c7728ca681f997a3a5b10f78ec7af13fd558d3 100644 (file)
@@ -41,8 +41,13 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
 
        if (pci_is_root_bus(pbus))
                dev = pbus->bridge;
-       else
+       else {
+               /* If pbus is a virtual bus, there is no bridge to it */
+               if (!pbus->self)
+                       return NULL;
+
                dev = &pbus->self->dev;
+       }
 
        return ACPI_HANDLE(dev);
 }
index 5be8db45e368b23eb4c0d7b16b92f9db49290998..4c8ac5fcc224e2ab4c6af62cd01e130b2bf7dbb1 100644 (file)
@@ -331,6 +331,7 @@ struct pci_dev {
        unsigned int    is_added:1;
        unsigned int    is_busmaster:1; /* device is busmaster */
        unsigned int    no_msi:1;       /* device may not use msi */
+       unsigned int    no_64bit_msi:1; /* device may only use 32-bit MSIs */
        unsigned int    block_cfg_access:1;     /* config space access is blocked */
        unsigned int    broken_parity_status:1; /* Device generates false positive parity */
        unsigned int    irq_reroute_variant:2;  /* device needs IRQ rerouting variant */
index d5c89e0dd0e6725c614b491c78b5bfafe9cc46f4..51ce60c35f4c69a6df45d1e104d56d5e47798a78 100644 (file)
@@ -133,7 +133,13 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
        /* paired with smp_store_release() in percpu_ref_reinit() */
        smp_read_barrier_depends();
 
-       if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC))
+       /*
+        * Theoretically, the following could test just ATOMIC; however,
+        * then we'd have to mask off DEAD separately as DEAD may be
+        * visible without ATOMIC if we race with percpu_ref_kill().  DEAD
+        * implies ATOMIC anyway.  Test them together.
+        */
+       if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
                return false;
 
        *percpu_countp = (unsigned long __percpu *)percpu_ptr;
diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h
deleted file mode 100644 (file)
index a5f045e..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * rcar_du.h  --  R-Car Display Unit DRM driver
- *
- * Copyright (C) 2013 Renesas Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __RCAR_DU_H__
-#define __RCAR_DU_H__
-
-#include <video/videomode.h>
-
-enum rcar_du_output {
-       RCAR_DU_OUTPUT_DPAD0,
-       RCAR_DU_OUTPUT_DPAD1,
-       RCAR_DU_OUTPUT_LVDS0,
-       RCAR_DU_OUTPUT_LVDS1,
-       RCAR_DU_OUTPUT_TCON,
-       RCAR_DU_OUTPUT_MAX,
-};
-
-enum rcar_du_encoder_type {
-       RCAR_DU_ENCODER_UNUSED = 0,
-       RCAR_DU_ENCODER_NONE,
-       RCAR_DU_ENCODER_VGA,
-       RCAR_DU_ENCODER_LVDS,
-};
-
-struct rcar_du_panel_data {
-       unsigned int width_mm;          /* Panel width in mm */
-       unsigned int height_mm;         /* Panel height in mm */
-       struct videomode mode;
-};
-
-struct rcar_du_connector_lvds_data {
-       struct rcar_du_panel_data panel;
-};
-
-struct rcar_du_connector_vga_data {
-       /* TODO: Add DDC information for EDID retrieval */
-};
-
-/*
- * struct rcar_du_encoder_data - Encoder platform data
- * @type: the encoder type (RCAR_DU_ENCODER_*)
- * @output: the DU output the connector is connected to (RCAR_DU_OUTPUT_*)
- * @connector.lvds: platform data for LVDS connectors
- * @connector.vga: platform data for VGA connectors
- *
- * Encoder platform data describes an on-board encoder, its associated DU SoC
- * output, and the connector.
- */
-struct rcar_du_encoder_data {
-       enum rcar_du_encoder_type type;
-       enum rcar_du_output output;
-
-       union {
-               struct rcar_du_connector_lvds_data lvds;
-               struct rcar_du_connector_vga_data vga;
-       } connector;
-};
-
-struct rcar_du_platform_data {
-       struct rcar_du_encoder_data *encoders;
-       unsigned int num_encoders;
-};
-
-#endif /* __RCAR_DU_H__ */
index 73e938b7e9374c68ac00fd99c65247eac9241fd4..2e0e06daf8c0692c16561eb35928b1502647f1ff 100644 (file)
@@ -72,8 +72,10 @@ struct generic_pm_domain {
        bool max_off_time_changed;
        bool cached_power_down_ok;
        struct gpd_cpuidle_data *cpuidle_data;
-       void (*attach_dev)(struct device *dev);
-       void (*detach_dev)(struct device *dev);
+       int (*attach_dev)(struct generic_pm_domain *domain,
+                         struct device *dev);
+       void (*detach_dev)(struct generic_pm_domain *domain,
+                          struct device *dev);
 };
 
 static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -104,7 +106,7 @@ struct generic_pm_domain_data {
        struct notifier_block nb;
        struct mutex lock;
        unsigned int refcount;
-       bool need_restore;
+       int need_restore;
 };
 
 #ifdef CONFIG_PM_GENERIC_DOMAINS
index 07e7945a1ff292048da586ac9709d17691a29fca..e97fc656a058c07d7c0c79a807cb3e04bec6dbf1 100644 (file)
@@ -253,9 +253,6 @@ struct charger_manager {
        struct device *dev;
        struct charger_desc *desc;
 
-       struct power_supply *fuel_gauge;
-       struct power_supply **charger_stat;
-
 #ifdef CONFIG_THERMAL
        struct thermal_zone_device *tzd_batt;
 #endif
index 3ed0496730229556921e3e77a5deeeecbb20bbac..096dbced02ac4b7cb9d18de6a8aa65e0592d78e9 100644 (file)
@@ -200,6 +200,12 @@ struct power_supply {
        void (*external_power_changed)(struct power_supply *psy);
        void (*set_charged)(struct power_supply *psy);
 
+       /*
+        * Set if thermal zone should not be created for this power supply.
+        * For example for virtual supplies forwarding calls to actual
+        * sensors or other supplies.
+        */
+       bool no_thermal;
        /* For APM emulation, think legacy userspace. */
        int use_for_apm;
 
index 49a4d6f59108f957d4534190c161344981c48e46..e2c13cd863bdc5b41a65731a4e55018aee3762f4 100644 (file)
@@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
        __ring_buffer_alloc((size), (flags), &__key);   \
 })
 
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu);
+int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
 int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
                          struct file *filp, poll_table *poll_table);
 
index ec538fc287a66000f7ce357f4bfe8012223acc26..bb9b83640070f4a366a8ca67f56495f07aaa7830 100644 (file)
@@ -256,7 +256,7 @@ struct ucred {
 #define MSG_EOF         MSG_FIN
 
 #define MSG_FASTOPEN   0x20000000      /* Send data in TCP SYN */
-#define MSG_CMSG_CLOEXEC 0x40000000    /* Set close_on_exit for file
+#define MSG_CMSG_CLOEXEC 0x40000000    /* Set close_on_exec for file
                                           descriptor received through
                                           SCM_RIGHTS */
 #if defined(CONFIG_COMPAT)
index d9fa68f26c41c34c33db5f743a4142faf7886792..2a25dec3021166d5aba52ad155e8ca01e0b1570e 100644 (file)
@@ -34,7 +34,6 @@
  * @list: used to maintain a list of currently available transports
  * @name: the human-readable name of the transport
  * @maxsize: transport provided maximum packet size
- * @pref: Preferences of this transport
  * @def: set if this transport should be considered the default
  * @create: member function to create a new connection on this transport
  * @close: member function to discard a connection on this transport
index fe7994c48b75685174134e7817bbb20ef375f890..b2828a06a5a63355f1aa2be27e74a472cc99fc62 100644 (file)
@@ -37,6 +37,8 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int inet_ctl_sock_create(struct sock **sk, unsigned short family,
                         unsigned short type, unsigned char protocol,
                         struct net *net);
+int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
+                   int *addr_len);
 
 static inline void inet_ctl_sock_destroy(struct sock *sk)
 {
index 845c596bf594c5ac297320c1a3dfe8f34cf702b4..3ae969e3acf016474413e1209381c60091befe1a 100644 (file)
@@ -396,14 +396,12 @@ struct nft_rule {
 /**
  *     struct nft_trans - nf_tables object update in transaction
  *
- *     @rcu_head: rcu head to defer release of transaction data
  *     @list: used internally
  *     @msg_type: message type
  *     @ctx: transaction context
  *     @data: internal information related to the transaction
  */
 struct nft_trans {
-       struct rcu_head                 rcu_head;
        struct list_head                list;
        int                             msg_type;
        struct nft_ctx                  ctx;
index a47790bcaa3831b1c2692b3cf27c4306ff39e631..2a50a70ef5870c76e0694ca460182671df46973e 100644 (file)
@@ -100,6 +100,15 @@ static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
        return iptunnel_handle_offloads(skb, udp_csum, type);
 }
 
+static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff)
+{
+       struct udphdr *uh;
+
+       uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr));
+       skb_shinfo(skb)->gso_type |= uh->check ?
+                               SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+}
+
 static inline void udp_tunnel_encap_enable(struct socket *sock)
 {
 #if IS_ENABLED(CONFIG_IPV6)
index d5f59f3fc35df67141c8234a8741ab5b49501ad1..57cccd0052e58dd124ec997174d8cd9ff885be99 100644 (file)
@@ -8,6 +8,12 @@
 #define VNI_HASH_BITS  10
 #define VNI_HASH_SIZE  (1<<VNI_HASH_BITS)
 
+/* VXLAN protocol header */
+struct vxlanhdr {
+       __be32 vx_flags;
+       __be32 vx_vni;
+};
+
 struct vxlan_sock;
 typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key);
 
@@ -45,6 +51,18 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
                   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
                   __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
 
+static inline bool vxlan_gso_check(struct sk_buff *skb)
+{
+       if ((skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) &&
+           (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+            skb->inner_protocol != htons(ETH_P_TEB) ||
+            (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
+             sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
+               return false;
+
+       return true;
+}
+
 /* IP header + UDP + VXLAN + Ethernet header */
 #define VXLAN_HEADROOM (20 + 8 + 8 + 14)
 /* IPv6 header + UDP + VXLAN + Ethernet header */
index e862497f75568d11cd4deb4f5f5a06712f63d6de..8bb00a27e219e902c9bdc2ac52f0a7c3ed53f005 100644 (file)
@@ -184,6 +184,8 @@ struct snd_pcm_ops {
 #define SNDRV_PCM_FMTBIT_DSD_U8                _SNDRV_PCM_FMTBIT(DSD_U8)
 #define SNDRV_PCM_FMTBIT_DSD_U16_LE    _SNDRV_PCM_FMTBIT(DSD_U16_LE)
 #define SNDRV_PCM_FMTBIT_DSD_U32_LE    _SNDRV_PCM_FMTBIT(DSD_U32_LE)
+#define SNDRV_PCM_FMTBIT_DSD_U16_BE    _SNDRV_PCM_FMTBIT(DSD_U16_BE)
+#define SNDRV_PCM_FMTBIT_DSD_U32_BE    _SNDRV_PCM_FMTBIT(DSD_U32_BE)
 
 #ifdef SNDRV_LITTLE_ENDIAN
 #define SNDRV_PCM_FMTBIT_S16           SNDRV_PCM_FMTBIT_S16_LE
index 2883a7a6f9f3a932b38843d20a8887986804fc00..98f2ade0266eb9a28ae6340b2908bd8fabea0eca 100644 (file)
@@ -102,6 +102,8 @@ struct snd_soc_dpcm_runtime {
        /* state and update */
        enum snd_soc_dpcm_update runtime_update;
        enum snd_soc_dpcm_state state;
+
+       int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */
 };
 
 /* can this BE stop and free */
index 94db6a2c3540c1cb5b561c63933ca8c8339be80e..63116362543c7c3d58a207cbd4e19fb1828b9eeb 100644 (file)
@@ -29,6 +29,8 @@
 #include <linux/ktime.h>
 #include <linux/tracepoint.h>
 
+struct host1x_bo;
+
 DECLARE_EVENT_CLASS(host1x,
        TP_PROTO(const char *name),
        TP_ARGS(name),
@@ -79,14 +81,14 @@ TRACE_EVENT(host1x_cdma_push,
 );
 
 TRACE_EVENT(host1x_cdma_push_gather,
-       TP_PROTO(const char *name, u32 mem_id,
+       TP_PROTO(const char *name, struct host1x_bo *bo,
                        u32 words, u32 offset, void *cmdbuf),
 
-       TP_ARGS(name, mem_id, words, offset, cmdbuf),
+       TP_ARGS(name, bo, words, offset, cmdbuf),
 
        TP_STRUCT__entry(
                __field(const char *, name)
-               __field(u32, mem_id)
+               __field(struct host1x_bo *, bo)
                __field(u32, words)
                __field(u32, offset)
                __field(bool, cmdbuf)
@@ -100,13 +102,13 @@ TRACE_EVENT(host1x_cdma_push_gather,
                }
                __entry->cmdbuf = cmdbuf;
                __entry->name = name;
-               __entry->mem_id = mem_id;
+               __entry->bo = bo;
                __entry->words = words;
                __entry->offset = offset;
        ),
 
-       TP_printk("name=%s, mem_id=%08x, words=%u, offset=%d, contents=[%s]",
-         __entry->name, __entry->mem_id,
+       TP_printk("name=%s, bo=%p, words=%u, offset=%d, contents=[%s]",
+         __entry->name, __entry->bo,
          __entry->words, __entry->offset,
          __print_hex(__get_dynamic_array(cmdbuf),
                  __entry->cmdbuf ? __entry->words * 4 : 0))
@@ -221,12 +223,13 @@ TRACE_EVENT(host1x_syncpt_load_min,
 );
 
 TRACE_EVENT(host1x_syncpt_wait_check,
-       TP_PROTO(void *mem_id, u32 offset, u32 syncpt_id, u32 thresh, u32 min),
+       TP_PROTO(struct host1x_bo *bo, u32 offset, u32 syncpt_id, u32 thresh,
+                u32 min),
 
-       TP_ARGS(mem_id, offset, syncpt_id, thresh, min),
+       TP_ARGS(bo, offset, syncpt_id, thresh, min),
 
        TP_STRUCT__entry(
-               __field(void *, mem_id)
+               __field(struct host1x_bo *, bo)
                __field(u32, offset)
                __field(u32, syncpt_id)
                __field(u32, thresh)
@@ -234,15 +237,15 @@ TRACE_EVENT(host1x_syncpt_wait_check,
        ),
 
        TP_fast_assign(
-               __entry->mem_id = mem_id;
+               __entry->bo = bo;
                __entry->offset = offset;
                __entry->syncpt_id = syncpt_id;
                __entry->thresh = thresh;
                __entry->min = min;
        ),
 
-       TP_printk("mem_id=%p, offset=%05x, id=%d, thresh=%d, current=%d",
-               __entry->mem_id, __entry->offset,
+       TP_printk("bo=%p, offset=%05x, id=%d, thresh=%d, current=%d",
+               __entry->bo, __entry->offset,
                __entry->syncpt_id, __entry->thresh,
                __entry->min)
 );
index a0db2d4aa5f0125349adc7ec6516fbfbda63be43..86574b0005ff0a476fa2299b8ef73de418670024 100644 (file)
@@ -286,6 +286,8 @@ struct drm_mode_get_property {
        char name[DRM_PROP_NAME_LEN];
 
        __u32 count_values;
+       /* This is only used to count enum values, not blobs. The _blobs is
+        * simply because of a historical reason, i.e. backwards compat. */
        __u32 count_enum_blobs;
 };
 
index ff57f07c32498933be7be28d874fb09a164f5b04..250262265ee31c3099b00850a29c6729889dcee9 100644 (file)
@@ -340,6 +340,7 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
 #define I915_PARAM_HAS_WT               27
 #define I915_PARAM_CMD_PARSER_VERSION   28
+#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
 
 typedef struct drm_i915_getparam {
        int param;
@@ -876,6 +877,12 @@ struct drm_i915_gem_get_tiling {
         * mmap mapping.
         */
        __u32 swizzle_mode;
+
+       /**
+        * Returned address bit 6 swizzling required for CPU access through
+        * mmap mapping whilst bound.
+        */
+       __u32 phys_swizzle_mode;
 };
 
 struct drm_i915_gem_get_aperture {
index b70237e8bc37b49bb9905669fb5d109ed17300fb..8523f9bb72f2d0039963078d765084ee5dfdf155 100644 (file)
@@ -125,6 +125,7 @@ header-y += filter.h
 header-y += firewire-cdev.h
 header-y += firewire-constants.h
 header-y += flat.h
+header-y += fou.h
 header-y += fs.h
 header-y += fsl_hypervisor.h
 header-y += fuse.h
@@ -141,6 +142,7 @@ header-y += hid.h
 header-y += hiddev.h
 header-y += hidraw.h
 header-y += hpet.h
+header-y += hsr_netlink.h
 header-y += hyperv.h
 header-y += hysdn_if.h
 header-y += i2c-dev.h
@@ -251,6 +253,7 @@ header-y += mii.h
 header-y += minix_fs.h
 header-y += mman.h
 header-y += mmtimer.h
+header-y += mpls.h
 header-y += mqueue.h
 header-y += mroute.h
 header-y += mroute6.h
@@ -424,6 +427,7 @@ header-y += virtio_net.h
 header-y += virtio_pci.h
 header-y += virtio_ring.h
 header-y += virtio_rng.h
+header-y += vm_sockets.h
 header-y += vt.h
 header-y += wait.h
 header-y += wanrouter.h
index 39f621a9fe826cb9a0eab487cd36a51aed5440ab..da17e456908d2d16d1500499858296a042b3186c 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/types.h>
 #include <linux/if_ether.h>
+#include <linux/in6.h>
 
 #define SYSFS_BRIDGE_ATTR      "bridge"
 #define SYSFS_BRIDGE_FDB       "brforward"
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
new file mode 100644 (file)
index 0000000..7acef41
--- /dev/null
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_IOCTL_H_INCLUDED
+#define KFD_IOCTL_H_INCLUDED
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define KFD_IOCTL_MAJOR_VERSION 1
+#define KFD_IOCTL_MINOR_VERSION 0
+
+struct kfd_ioctl_get_version_args {
+       uint32_t major_version; /* from KFD */
+       uint32_t minor_version; /* from KFD */
+};
+
+/* For kfd_ioctl_create_queue_args.queue_type. */
+#define KFD_IOC_QUEUE_TYPE_COMPUTE     0
+#define KFD_IOC_QUEUE_TYPE_SDMA                1
+#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 2
+
+#define KFD_MAX_QUEUE_PERCENTAGE       100
+#define KFD_MAX_QUEUE_PRIORITY         15
+
+struct kfd_ioctl_create_queue_args {
+       uint64_t ring_base_address;     /* to KFD */
+       uint64_t write_pointer_address; /* from KFD */
+       uint64_t read_pointer_address;  /* from KFD */
+       uint64_t doorbell_offset;       /* from KFD */
+
+       uint32_t ring_size;             /* to KFD */
+       uint32_t gpu_id;                /* to KFD */
+       uint32_t queue_type;            /* to KFD */
+       uint32_t queue_percentage;      /* to KFD */
+       uint32_t queue_priority;        /* to KFD */
+       uint32_t queue_id;              /* from KFD */
+
+       uint64_t eop_buffer_address;    /* to KFD */
+       uint64_t eop_buffer_size;       /* to KFD */
+       uint64_t ctx_save_restore_address; /* to KFD */
+       uint64_t ctx_save_restore_size; /* to KFD */
+};
+
+struct kfd_ioctl_destroy_queue_args {
+       uint32_t queue_id;              /* to KFD */
+       uint32_t pad;
+};
+
+struct kfd_ioctl_update_queue_args {
+       uint64_t ring_base_address;     /* to KFD */
+
+       uint32_t queue_id;              /* to KFD */
+       uint32_t ring_size;             /* to KFD */
+       uint32_t queue_percentage;      /* to KFD */
+       uint32_t queue_priority;        /* to KFD */
+};
+
+/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
+#define KFD_IOC_CACHE_POLICY_COHERENT 0
+#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
+
+struct kfd_ioctl_set_memory_policy_args {
+       uint64_t alternate_aperture_base;       /* to KFD */
+       uint64_t alternate_aperture_size;       /* to KFD */
+
+       uint32_t gpu_id;                        /* to KFD */
+       uint32_t default_policy;                /* to KFD */
+       uint32_t alternate_policy;              /* to KFD */
+       uint32_t pad;
+};
+
+/*
+ * All counters are monotonic. They are used for profiling of compute jobs.
+ * The profiling is done by userspace.
+ *
+ * In case of GPU reset, the counter should not be affected.
+ */
+
+struct kfd_ioctl_get_clock_counters_args {
+       uint64_t gpu_clock_counter;     /* from KFD */
+       uint64_t cpu_clock_counter;     /* from KFD */
+       uint64_t system_clock_counter;  /* from KFD */
+       uint64_t system_clock_freq;     /* from KFD */
+
+       uint32_t gpu_id;                /* to KFD */
+       uint32_t pad;
+};
+
+#define NUM_OF_SUPPORTED_GPUS 7
+
+struct kfd_process_device_apertures {
+       uint64_t lds_base;              /* from KFD */
+       uint64_t lds_limit;             /* from KFD */
+       uint64_t scratch_base;          /* from KFD */
+       uint64_t scratch_limit;         /* from KFD */
+       uint64_t gpuvm_base;            /* from KFD */
+       uint64_t gpuvm_limit;           /* from KFD */
+       uint32_t gpu_id;                /* from KFD */
+       uint32_t pad;
+};
+
+struct kfd_ioctl_get_process_apertures_args {
+       struct kfd_process_device_apertures
+                       process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
+
+       /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
+       uint32_t num_of_nodes;
+       uint32_t pad;
+};
+
+#define KFD_IOC_MAGIC 'K'
+
+#define KFD_IOC_GET_VERSION \
+               _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args)
+
+#define KFD_IOC_CREATE_QUEUE \
+               _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args)
+
+#define KFD_IOC_DESTROY_QUEUE \
+       _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args)
+
+#define KFD_IOC_SET_MEMORY_POLICY \
+       _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args)
+
+#define KFD_IOC_GET_CLOCK_COUNTERS \
+       _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args)
+
+#define KFD_IOC_GET_PROCESS_APERTURES \
+       _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args)
+
+#define KFD_IOC_UPDATE_QUEUE \
+       _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args)
+
+#endif
index 6ee586728df97a0fc335a3c314e0828ac970023f..941d32f007dc250afb73a8045044d68b2646eecb 100644 (file)
@@ -220,7 +220,9 @@ typedef int __bitwise snd_pcm_format_t;
 #define        SNDRV_PCM_FORMAT_DSD_U8         ((__force snd_pcm_format_t) 48) /* DSD, 1-byte samples DSD (x8) */
 #define        SNDRV_PCM_FORMAT_DSD_U16_LE     ((__force snd_pcm_format_t) 49) /* DSD, 2-byte samples DSD (x16), little endian */
 #define        SNDRV_PCM_FORMAT_DSD_U32_LE     ((__force snd_pcm_format_t) 50) /* DSD, 4-byte samples DSD (x32), little endian */
-#define        SNDRV_PCM_FORMAT_LAST           SNDRV_PCM_FORMAT_DSD_U32_LE
+#define        SNDRV_PCM_FORMAT_DSD_U16_BE     ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */
+#define        SNDRV_PCM_FORMAT_DSD_U32_BE     ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */
+#define        SNDRV_PCM_FORMAT_LAST           SNDRV_PCM_FORMAT_DSD_U32_BE
 
 #ifdef SNDRV_LITTLE_ENDIAN
 #define        SNDRV_PCM_FORMAT_S16            SNDRV_PCM_FORMAT_S16_LE
index 800a0daede7e4d8b8940288bd30dfb43a6e922a8..321d0ceb26d3782ed6871feef4c52090e9872b23 100644 (file)
@@ -544,7 +544,7 @@ asmlinkage __visible void __init start_kernel(void)
                                  static_command_line, __start___param,
                                  __stop___param - __start___param,
                                  -1, -1, &unknown_bootoption);
-       if (after_dashes)
+       if (!IS_ERR_OR_NULL(after_dashes))
                parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
                           set_init_arg);
 
index 454f6c6020a8d98dccb167e46d3a5225d5f4ce2d..53c3310f41c6867fd4b5f3f0493a150184b8c617 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -507,13 +507,6 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
                return retval;
        }
 
-       id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
-       if (id < 0) {
-               ipc_rcu_putref(sma, sem_rcu_free);
-               return id;
-       }
-       ns->used_sems += nsems;
-
        sma->sem_base = (struct sem *) &sma[1];
 
        for (i = 0; i < nsems; i++) {
@@ -528,6 +521,14 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
        INIT_LIST_HEAD(&sma->list_id);
        sma->sem_nsems = nsems;
        sma->sem_ctime = get_seconds();
+
+       id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
+       if (id < 0) {
+               ipc_rcu_putref(sma, sem_rcu_free);
+               return id;
+       }
+       ns->used_sems += nsems;
+
        sem_unlock(sma, -1);
        rcu_read_unlock();
 
index 80983df92cd441f981090394252126fbde22cd64..cebb11db4d342642efccdc6fdebdea638353324c 100644 (file)
@@ -739,7 +739,7 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
 
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE);
        audit_log_task_info(ab, current);
-       audit_log_format(ab, "feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
+       audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
                         audit_feature_names[which], !!old_feature, !!new_feature,
                         !!old_lock, !!new_lock, res);
        audit_log_end(ab);
index e242e3a9864ad6032e3d32704aa9b95550036b8a..80f29e0155705159fc83a80c1138e45b590661e2 100644 (file)
@@ -154,6 +154,7 @@ static struct audit_chunk *alloc_chunk(int count)
                chunk->owners[i].index = i;
        }
        fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
+       chunk->mark.mask = FS_IN_IGNORED;
        return chunk;
 }
 
index 2b02c9fda790d667c29c304bca011901a4e30189..1cd5eef1fcddf3f53149522b0a482b7989e5a7a8 100644 (file)
@@ -1562,8 +1562,10 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group
 
        if (!task) {
                /*
-                * Per cpu events are removed via an smp call and
-                * the removal is always successful.
+                * Per cpu events are removed via an smp call. The removal can
+                * fail if the CPU is currently offline, but in that case we
+                * already called __perf_remove_from_context from
+                * perf_event_exit_cpu.
                 */
                cpu_function_call(event->cpu, __perf_remove_from_context, &re);
                return;
@@ -8117,7 +8119,7 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
 
 static void __perf_event_exit_context(void *__info)
 {
-       struct remove_event re = { .detach_group = false };
+       struct remove_event re = { .detach_group = true };
        struct perf_event_context *ctx = __info;
 
        perf_pmu_rotate_stop(ctx->pmu);
index 1d0af8a2c6469bda46438dbd8383cbf535d65077..d2a5689a6b2ec02248a5d4bafb0f5eac3043fd59 100644 (file)
@@ -193,7 +193,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
        }
 
        flush_cache_page(vma, addr, pte_pfn(*ptep));
-       ptep_clear_flush(vma, addr, ptep);
+       ptep_clear_flush_notify(vma, addr, ptep);
        set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
 
        page_remove_rmap(page);
@@ -1640,7 +1640,6 @@ bool uprobe_deny_signal(void)
                if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
                        utask->state = UTASK_SSTEP_TRAPPED;
                        set_tsk_thread_flag(t, TIF_UPROBE);
-                       set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
                }
        }
 
index d09dc5c32c6740e41a5987cca0252ad86b43bdbd..cf80672b79246dd439f64cea16d3e631e7d35f2f 100644 (file)
@@ -244,6 +244,7 @@ static const struct tnt tnts[] = {
  *  'I' - Working around severe firmware bug.
  *  'O' - Out-of-tree module has been loaded.
  *  'E' - Unsigned module has been loaded.
+ *  'L' - A soft lockup has previously occurred.
  *
  *     The string is overwritten by the next call to print_tainted().
  */
index 4ca9a33ff62020e63d15219ce9f097611ebf6507..c347e3ce3a55df9efe054caaffd08bed4bb9e886 100644 (file)
@@ -146,7 +146,7 @@ static int platform_suspend_prepare(suspend_state_t state)
 
 static int platform_suspend_prepare_late(suspend_state_t state)
 {
-       return state == PM_SUSPEND_FREEZE && freeze_ops->prepare ?
+       return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ?
                freeze_ops->prepare() : 0;
 }
 
@@ -164,7 +164,7 @@ static void platform_resume_noirq(suspend_state_t state)
 
 static void platform_resume_early(suspend_state_t state)
 {
-       if (state == PM_SUSPEND_FREEZE && freeze_ops->restore)
+       if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->restore)
                freeze_ops->restore();
 }
 
index 240157c13ddc88508a4aafa87d8350394b38e94b..89e7283015a61ae1806566c3bea08b87c6a3a35d 100644 (file)
@@ -2474,44 +2474,6 @@ DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
 EXPORT_PER_CPU_SYMBOL(kstat);
 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
 
-/*
- * Return any ns on the sched_clock that have not yet been accounted in
- * @p in case that task is currently running.
- *
- * Called with task_rq_lock() held on @rq.
- */
-static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
-{
-       u64 ns = 0;
-
-       /*
-        * Must be ->curr _and_ ->on_rq.  If dequeued, we would
-        * project cycles that may never be accounted to this
-        * thread, breaking clock_gettime().
-        */
-       if (task_current(rq, p) && task_on_rq_queued(p)) {
-               update_rq_clock(rq);
-               ns = rq_clock_task(rq) - p->se.exec_start;
-               if ((s64)ns < 0)
-                       ns = 0;
-       }
-
-       return ns;
-}
-
-unsigned long long task_delta_exec(struct task_struct *p)
-{
-       unsigned long flags;
-       struct rq *rq;
-       u64 ns = 0;
-
-       rq = task_rq_lock(p, &flags);
-       ns = do_task_delta_exec(p, rq);
-       task_rq_unlock(rq, p, &flags);
-
-       return ns;
-}
-
 /*
  * Return accounted runtime for the task.
  * In case the task is currently running, return the runtime plus current's
@@ -2521,7 +2483,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
 {
        unsigned long flags;
        struct rq *rq;
-       u64 ns = 0;
+       u64 ns;
 
 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
        /*
@@ -2540,7 +2502,16 @@ unsigned long long task_sched_runtime(struct task_struct *p)
 #endif
 
        rq = task_rq_lock(p, &flags);
-       ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
+       /*
+        * Must be ->curr _and_ ->on_rq.  If dequeued, we would
+        * project cycles that may never be accounted to this
+        * thread, breaking clock_gettime().
+        */
+       if (task_current(rq, p) && task_on_rq_queued(p)) {
+               update_rq_clock(rq);
+               p->sched_class->update_curr(rq);
+       }
+       ns = p->se.sum_exec_runtime;
        task_rq_unlock(rq, p, &flags);
 
        return ns;
@@ -2903,10 +2874,14 @@ asmlinkage __visible void __sched schedule_user(void)
         * or we have been woken up remotely but the IPI has not yet arrived,
         * we haven't yet exited the RCU idle mode. Do it here manually until
         * we find a better solution.
+        *
+        * NB: There are buggy callers of this function.  Ideally we
+        * should warn if prev_state != IN_USER, but that will trigger
+        * too frequently to make sense yet.
         */
-       user_exit();
+       enum ctx_state prev_state = exception_enter();
        schedule();
-       user_enter();
+       exception_exit(prev_state);
 }
 #endif
 
@@ -6368,6 +6343,10 @@ static void sched_init_numa(void)
                if (!sched_debug())
                        break;
        }
+
+       if (!level)
+               return;
+
        /*
         * 'level' contains the number of unique distances, excluding the
         * identity distance node_distance(i,i).
@@ -7444,8 +7423,12 @@ void sched_move_task(struct task_struct *tsk)
        if (unlikely(running))
                put_prev_task(rq, tsk);
 
-       tg = container_of(task_css_check(tsk, cpu_cgrp_id,
-                               lockdep_is_held(&tsk->sighand->siglock)),
+       /*
+        * All callers are synchronized by task_rq_lock(); we do not use RCU
+        * which is pointless here. Thus, we pass "true" to task_css_check()
+        * to prevent lockdep warnings.
+        */
+       tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
                          struct task_group, css);
        tg = autogroup_task_group(tsk, tg);
        tsk->sched_task_group = tg;
index 5285332392d5b599788a90faa37615ccddc386d5..28fa9d9e92012a9245b2e95ce85bf301d320f8cc 100644 (file)
@@ -1701,4 +1701,6 @@ const struct sched_class dl_sched_class = {
        .prio_changed           = prio_changed_dl,
        .switched_from          = switched_from_dl,
        .switched_to            = switched_to_dl,
+
+       .update_curr            = update_curr_dl,
 };
index 34baa60f8a7bd11f03ccb1f754a08eb1029c46be..ef2b104b254cb8c60d954a92e62f0f8a626ed024 100644 (file)
@@ -726,6 +726,11 @@ static void update_curr(struct cfs_rq *cfs_rq)
        account_cfs_rq_runtime(cfs_rq, delta_exec);
 }
 
+static void update_curr_fair(struct rq *rq)
+{
+       update_curr(cfs_rq_of(&rq->curr->se));
+}
+
 static inline void
 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
@@ -1179,6 +1184,13 @@ static void task_numa_compare(struct task_numa_env *env,
                cur = NULL;
        raw_spin_unlock_irq(&dst_rq->lock);
 
+       /*
+        * Because we have preemption enabled we can get migrated around and
+        * end try selecting ourselves (current == env->p) as a swap candidate.
+        */
+       if (cur == env->p)
+               goto unlock;
+
        /*
         * "imp" is the fault differential for the source task between the
         * source and destination node. Calculate the total differential for
@@ -7949,6 +7961,8 @@ const struct sched_class fair_sched_class = {
 
        .get_rr_interval        = get_rr_interval_fair,
 
+       .update_curr            = update_curr_fair,
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
        .task_move_group        = task_move_group_fair,
 #endif
index 67ad4e7f506a2509a0493138662c0fec7dc4d7fa..c65dac8c97cdd5dd72f636455dcecc2742d9706d 100644 (file)
@@ -75,6 +75,10 @@ static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task
        return 0;
 }
 
+static void update_curr_idle(struct rq *rq)
+{
+}
+
 /*
  * Simple, special scheduling class for the per-CPU idle tasks:
  */
@@ -101,4 +105,5 @@ const struct sched_class idle_sched_class = {
 
        .prio_changed           = prio_changed_idle,
        .switched_to            = switched_to_idle,
+       .update_curr            = update_curr_idle,
 };
index d024e6ce30baf50037eac7c256dacc5bf27856f4..20bca398084ae770b36945e9aef238d954a6796e 100644 (file)
@@ -2128,6 +2128,8 @@ const struct sched_class rt_sched_class = {
 
        .prio_changed           = prio_changed_rt,
        .switched_to            = switched_to_rt,
+
+       .update_curr            = update_curr_rt,
 };
 
 #ifdef CONFIG_SCHED_DEBUG
index 24156c8434d1ebbe679a3fa6df45725aa7db3647..2df8ef067cc54ddd7a25c0b1cf45267214a2c31c 100644 (file)
@@ -1135,6 +1135,8 @@ struct sched_class {
        unsigned int (*get_rr_interval) (struct rq *rq,
                                         struct task_struct *task);
 
+       void (*update_curr) (struct rq *rq);
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
        void (*task_move_group) (struct task_struct *p, int on_rq);
 #endif
index 67426e529f59c044eef35c88c8d906cab641bb64..79ffec45a6acd9415d31d90906b529a6bc0d752b 100644 (file)
@@ -102,6 +102,10 @@ get_rr_interval_stop(struct rq *rq, struct task_struct *task)
        return 0;
 }
 
+static void update_curr_stop(struct rq *rq)
+{
+}
+
 /*
  * Simple, special scheduling class for the per-CPU stop tasks:
  */
@@ -128,4 +132,5 @@ const struct sched_class stop_sched_class = {
 
        .prio_changed           = prio_changed_stop,
        .switched_to            = switched_to_stop,
+       .update_curr            = update_curr_stop,
 };
index 492b986195d53350abe899b0ecd059f91edb77a4..a16b67859e2a79929331c5008f96fd7f29ebbfc1 100644 (file)
@@ -553,7 +553,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock,
                *sample = cputime_to_expires(cputime.utime);
                break;
        case CPUCLOCK_SCHED:
-               *sample = cputime.sum_exec_runtime + task_delta_exec(p);
+               *sample = cputime.sum_exec_runtime;
                break;
        }
        return 0;
index 2d75c94ae87d871bbf42db7b1ee949463bb7f65f..a56e07c8d15b8b730eb54f2020a018ff440eec6c 100644 (file)
@@ -538,16 +538,18 @@ static void rb_wake_up_waiters(struct irq_work *work)
  * ring_buffer_wait - wait for input to the ring buffer
  * @buffer: buffer to wait on
  * @cpu: the cpu buffer to wait on
+ * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
  *
  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
  * as data is added to any of the @buffer's cpu buffers. Otherwise
  * it will wait for data to be added to a specific cpu buffer.
  */
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
+int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
 {
-       struct ring_buffer_per_cpu *cpu_buffer;
+       struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
        DEFINE_WAIT(wait);
        struct rb_irq_work *work;
+       int ret = 0;
 
        /*
         * Depending on what the caller is waiting for, either any
@@ -564,36 +566,61 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
        }
 
 
-       prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
+       while (true) {
+               prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
 
-       /*
-        * The events can happen in critical sections where
-        * checking a work queue can cause deadlocks.
-        * After adding a task to the queue, this flag is set
-        * only to notify events to try to wake up the queue
-        * using irq_work.
-        *
-        * We don't clear it even if the buffer is no longer
-        * empty. The flag only causes the next event to run
-        * irq_work to do the work queue wake up. The worse
-        * that can happen if we race with !trace_empty() is that
-        * an event will cause an irq_work to try to wake up
-        * an empty queue.
-        *
-        * There's no reason to protect this flag either, as
-        * the work queue and irq_work logic will do the necessary
-        * synchronization for the wake ups. The only thing
-        * that is necessary is that the wake up happens after
-        * a task has been queued. It's OK for spurious wake ups.
-        */
-       work->waiters_pending = true;
+               /*
+                * The events can happen in critical sections where
+                * checking a work queue can cause deadlocks.
+                * After adding a task to the queue, this flag is set
+                * only to notify events to try to wake up the queue
+                * using irq_work.
+                *
+                * We don't clear it even if the buffer is no longer
+                * empty. The flag only causes the next event to run
+                * irq_work to do the work queue wake up. The worse
+                * that can happen if we race with !trace_empty() is that
+                * an event will cause an irq_work to try to wake up
+                * an empty queue.
+                *
+                * There's no reason to protect this flag either, as
+                * the work queue and irq_work logic will do the necessary
+                * synchronization for the wake ups. The only thing
+                * that is necessary is that the wake up happens after
+                * a task has been queued. It's OK for spurious wake ups.
+                */
+               work->waiters_pending = true;
+
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+
+               if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
+                       break;
+
+               if (cpu != RING_BUFFER_ALL_CPUS &&
+                   !ring_buffer_empty_cpu(buffer, cpu)) {
+                       unsigned long flags;
+                       bool pagebusy;
+
+                       if (!full)
+                               break;
+
+                       raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+                       pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
+                       raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+                       if (!pagebusy)
+                               break;
+               }
 
-       if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
-           (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
                schedule();
+       }
 
        finish_wait(&work->waiters, &wait);
-       return 0;
+
+       return ret;
 }
 
 /**
index 8a528392b1f465da19d297a84699c4931102cf3d..92f4a6cee1727360ff9d739a126f168a6cacd980 100644 (file)
@@ -1076,13 +1076,14 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
 }
 #endif /* CONFIG_TRACER_MAX_TRACE */
 
-static int wait_on_pipe(struct trace_iterator *iter)
+static int wait_on_pipe(struct trace_iterator *iter, bool full)
 {
        /* Iterators are static, they should be filled or empty */
        if (trace_buffer_iter(iter, iter->cpu_file))
                return 0;
 
-       return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
+       return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
+                               full);
 }
 
 #ifdef CONFIG_FTRACE_STARTUP_TEST
@@ -4434,15 +4435,12 @@ static int tracing_wait_pipe(struct file *filp)
 
                mutex_unlock(&iter->mutex);
 
-               ret = wait_on_pipe(iter);
+               ret = wait_on_pipe(iter, false);
 
                mutex_lock(&iter->mutex);
 
                if (ret)
                        return ret;
-
-               if (signal_pending(current))
-                       return -EINTR;
        }
 
        return 1;
@@ -5372,16 +5370,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
                                goto out_unlock;
                        }
                        mutex_unlock(&trace_types_lock);
-                       ret = wait_on_pipe(iter);
+                       ret = wait_on_pipe(iter, false);
                        mutex_lock(&trace_types_lock);
                        if (ret) {
                                size = ret;
                                goto out_unlock;
                        }
-                       if (signal_pending(current)) {
-                               size = -EINTR;
-                               goto out_unlock;
-                       }
                        goto again;
                }
                size = 0;
@@ -5500,7 +5494,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        };
        struct buffer_ref *ref;
        int entries, size, i;
-       ssize_t ret;
+       ssize_t ret = 0;
 
        mutex_lock(&trace_types_lock);
 
@@ -5538,13 +5532,16 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                int r;
 
                ref = kzalloc(sizeof(*ref), GFP_KERNEL);
-               if (!ref)
+               if (!ref) {
+                       ret = -ENOMEM;
                        break;
+               }
 
                ref->ref = 1;
                ref->buffer = iter->trace_buffer->buffer;
                ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
                if (!ref->page) {
+                       ret = -ENOMEM;
                        kfree(ref);
                        break;
                }
@@ -5582,19 +5579,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 
        /* did we read anything? */
        if (!spd.nr_pages) {
+               if (ret)
+                       goto out;
+
                if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
                        ret = -EAGAIN;
                        goto out;
                }
                mutex_unlock(&trace_types_lock);
-               ret = wait_on_pipe(iter);
+               ret = wait_on_pipe(iter, true);
                mutex_lock(&trace_types_lock);
                if (ret)
                        goto out;
-               if (signal_pending(current)) {
-                       ret = -EINTR;
-                       goto out;
-               }
+
                goto again;
        }
 
index 7512dc978f1872fe4a395868ed0f3cbd30de2ad0..0211d2bd5e17551856da7ca97aea0849528b2ead 100644 (file)
@@ -10,7 +10,7 @@ endif
 lib-y := ctype.o string.o vsprintf.o cmdline.o \
         rbtree.o radix-tree.o dump_stack.o timerqueue.o\
         idr.o int_sqrt.o extable.o \
-        sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
+        sha1.o md5.o irq_regs.o argv_split.o \
         proportions.o flex_proportions.o ratelimit.o show_mem.o \
         is_single_threaded.o plist.o decompress.o kobject_uevent.o \
         earlycpio.o
@@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
         bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
         gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
         bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
-        percpu-refcount.o percpu_ida.o hash.o rhashtable.o
+        percpu-refcount.o percpu_ida.o hash.o rhashtable.o reciprocal_div.o
 obj-y += string_helpers.o
 obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
 obj-y += kstrtox.o
index cce4dd68c40da211948177f6903917d4ac81b176..2e65d206b01c13d3ad02a57c3d0842b89c8637bd 100644 (file)
@@ -598,6 +598,7 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
 
        return pool;
 }
+EXPORT_SYMBOL(devm_gen_pool_create);
 
 /**
  * dev_get_gen_pool - Obtain the gen_pool (if any) for a device
index 081be3ba9ea8285b05374e6bb98299af541f567c..624a0b7c05ef15934cee4a2574852f74620878a9 100644 (file)
@@ -230,7 +230,7 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
        ht->shift++;
 
        /* For each new bucket, search the corresponding old bucket
-        * for the rst entry that hashes to the new bucket, and
+        * for the first entry that hashes to the new bucket, and
         * link the new bucket to that entry. Since all the entries
         * which will end up in the new bucket appear in the same
         * old bucket, this constructs an entirely valid new hash
@@ -248,8 +248,8 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
        }
 
        /* Publish the new table pointer. Lookups may now traverse
-        * the new table, but they will not benet from any
-        * additional efciency until later steps unzip the buckets.
+        * the new table, but they will not benefit from any
+        * additional efficiency until later steps unzip the buckets.
         */
        rcu_assign_pointer(ht->tbl, new_tbl);
 
@@ -306,14 +306,14 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
 
        ht->shift--;
 
-       /* Link each bucket in the new table to the rst bucket
+       /* Link each bucket in the new table to the first bucket
         * in the old table that contains entries which will hash
         * to the new bucket.
         */
        for (i = 0; i < ntbl->size; i++) {
                ntbl->buckets[i] = tbl->buckets[i];
 
-               /* Link each bucket in the new table to the rst bucket
+               /* Link each bucket in the new table to the first bucket
                 * in the old table that contains entries which will hash
                 * to the new bucket.
                 */
index 09225796991a83a9281194d855719021b8b18282..5e256271b47b02b1a0265ad6a89b2bccf5c40002 100644 (file)
@@ -28,7 +28,7 @@ void show_mem(unsigned int filter)
                                continue;
 
                        total += zone->present_pages;
-                       reserved = zone->present_pages - zone->managed_pages;
+                       reserved += zone->present_pages - zone->managed_pages;
 
                        if (is_highmem_idx(zoneid))
                                highmem += zone->present_pages;
index 8a000cebb0d7428d5ec48dcfa979086c57e85109..477be696511d669230b47c73d52a8b3c1836c457 100644 (file)
@@ -243,13 +243,10 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
 
 static int reset_managed_pages_done __initdata;
 
-static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
+void reset_node_managed_pages(pg_data_t *pgdat)
 {
        struct zone *z;
 
-       if (reset_managed_pages_done)
-               return;
-
        for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
                z->managed_pages = 0;
 }
@@ -258,8 +255,12 @@ void __init reset_all_zones_managed_pages(void)
 {
        struct pglist_data *pgdat;
 
+       if (reset_managed_pages_done)
+               return;
+
        for_each_online_pgdat(pgdat)
                reset_node_managed_pages(pgdat);
+
        reset_managed_pages_done = 1;
 }
 
index 963bc4add9af88838b494e6dd56b2bf2239548a2..fde706e1284fbc05edd04722fd8f0abf92d406bf 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -124,6 +124,7 @@ static int __init cma_activate_area(struct cma *cma)
 
 err:
        kfree(cma->bitmap);
+       cma->count = 0;
        return -EINVAL;
 }
 
@@ -217,9 +218,8 @@ int __init cma_declare_contiguous(phys_addr_t base,
        phys_addr_t highmem_start = __pa(high_memory);
        int ret = 0;
 
-       pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
-               __func__, (unsigned long)size, (unsigned long)base,
-               (unsigned long)limit, (unsigned long)alignment);
+       pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
+               __func__, &size, &base, &limit, &alignment);
 
        if (cma_area_count == ARRAY_SIZE(cma_areas)) {
                pr_err("Not enough slots for CMA reserved regions!\n");
@@ -244,52 +244,72 @@ int __init cma_declare_contiguous(phys_addr_t base,
        size = ALIGN(size, alignment);
        limit &= ~(alignment - 1);
 
+       if (!base)
+               fixed = false;
+
        /* size should be aligned with order_per_bit */
        if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
                return -EINVAL;
 
        /*
-        * adjust limit to avoid crossing low/high memory boundary for
-        * automatically allocated regions
+        * If allocating at a fixed base the request region must not cross the
+        * low/high memory boundary.
         */
-       if (((limit == 0 || limit > memblock_end) &&
-            (memblock_end - size < highmem_start &&
-             memblock_end > highmem_start)) ||
-           (!fixed && limit > highmem_start && limit - size < highmem_start)) {
-               limit = highmem_start;
-       }
-
-       if (fixed && base < highmem_start && base+size > highmem_start) {
+       if (fixed && base < highmem_start && base + size > highmem_start) {
                ret = -EINVAL;
-               pr_err("Region at %08lx defined on low/high memory boundary (%08lx)\n",
-                       (unsigned long)base, (unsigned long)highmem_start);
+               pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
+                       &base, &highmem_start);
                goto err;
        }
 
+       /*
+        * If the limit is unspecified or above the memblock end, its effective
+        * value will be the memblock end. Set it explicitly to simplify further
+        * checks.
+        */
+       if (limit == 0 || limit > memblock_end)
+               limit = memblock_end;
+
        /* Reserve memory */
-       if (base && fixed) {
+       if (fixed) {
                if (memblock_is_region_reserved(base, size) ||
                    memblock_reserve(base, size) < 0) {
                        ret = -EBUSY;
                        goto err;
                }
        } else {
-               phys_addr_t addr = memblock_alloc_range(size, alignment, base,
-                                                       limit);
+               phys_addr_t addr = 0;
+
+               /*
+                * All pages in the reserved area must come from the same zone.
+                * If the requested region crosses the low/high memory boundary,
+                * try allocating from high memory first and fall back to low
+                * memory in case of failure.
+                */
+               if (base < highmem_start && limit > highmem_start) {
+                       addr = memblock_alloc_range(size, alignment,
+                                                   highmem_start, limit);
+                       limit = highmem_start;
+               }
+
                if (!addr) {
-                       ret = -ENOMEM;
-                       goto err;
-               } else {
-                       base = addr;
+                       addr = memblock_alloc_range(size, alignment, base,
+                                                   limit);
+                       if (!addr) {
+                               ret = -ENOMEM;
+                               goto err;
+                       }
                }
+
+               base = addr;
        }
 
        ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
        if (ret)
                goto err;
 
-       pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
-               (unsigned long)base);
+       pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
+               &base);
        return 0;
 
 err:
index ec74cf0123efd3944894cc0b159b385d6b837f25..f9792ba3537ccc830594e7954715ca66eb2e9654 100644 (file)
@@ -479,6 +479,16 @@ isolate_freepages_range(struct compact_control *cc,
 
                block_end_pfn = min(block_end_pfn, end_pfn);
 
+               /*
+                * pfn could pass the block_end_pfn if isolated freepage
+                * is more than pageblock order. In this case, we adjust
+                * scanning range to right one.
+                */
+               if (pfn >= block_end_pfn) {
+                       block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+                       block_end_pfn = min(block_end_pfn, end_pfn);
+               }
+
                if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
                        break;
 
@@ -1029,8 +1039,12 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
        }
 
        acct_isolated(zone, cc);
-       /* Record where migration scanner will be restarted */
-       cc->migrate_pfn = low_pfn;
+       /*
+        * Record where migration scanner will be restarted. If we end up in
+        * the same pageblock as the free scanner, make the scanners fully
+        * meet so that compact_finished() terminates compaction.
+        */
+       cc->migrate_pfn = (end_pfn <= cc->free_pfn) ? low_pfn : cc->free_pfn;
 
        return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
 }
index 72b8fa36143328f728a44de1b5cd53b629653a0a..9129013732d72b323251ab680b0db39f2961aba9 100644 (file)
@@ -37,7 +37,7 @@ static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
 
        if (pte_present(pte)) {
                flush_cache_page(vma, addr, pte_pfn(pte));
-               pte = ptep_clear_flush(vma, addr, ptep);
+               pte = ptep_clear_flush_notify(vma, addr, ptep);
                page = vm_normal_page(vma, addr, pte);
                if (page) {
                        if (pte_dirty(pte))
index c30eec536f03fb7148e3c7a08538f6a2c3571857..f2a3571c6e22573867cf6e4ba2d481a663a3d04e 100644 (file)
@@ -244,8 +244,10 @@ int __frontswap_store(struct page *page)
                  the (older) page from frontswap
                 */
                inc_frontswap_failed_stores();
-               if (dup)
+               if (dup) {
                        __frontswap_clear(sis, offset);
+                       frontswap_ops->invalidate_page(type, offset);
+               }
        }
        if (frontswap_writethrough_enabled)
                /* report failure so swap also writes to swap device */
index de984159cf0b8a0be6a73b9f421e9a0b4f75c599..1d89526ed53196576cfbf2f00f220eebe3f1511a 100644 (file)
@@ -1036,7 +1036,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
                goto out_free_pages;
        VM_BUG_ON_PAGE(!PageHead(page), page);
 
-       pmdp_clear_flush(vma, haddr, pmd);
+       pmdp_clear_flush_notify(vma, haddr, pmd);
        /* leave pmd empty until pte is filled */
 
        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
@@ -1179,7 +1179,7 @@ alloc:
                pmd_t entry;
                entry = mk_huge_pmd(new_page, vma->vm_page_prot);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-               pmdp_clear_flush(vma, haddr, pmd);
+               pmdp_clear_flush_notify(vma, haddr, pmd);
                page_add_new_anon_rmap(new_page, vma, haddr);
                mem_cgroup_commit_charge(new_page, memcg, false);
                lru_cache_add_active_or_unevictable(new_page, vma);
@@ -1512,7 +1512,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                pmd_t entry;
                ret = 1;
                if (!prot_numa) {
-                       entry = pmdp_get_and_clear(mm, addr, pmd);
+                       entry = pmdp_get_and_clear_notify(mm, addr, pmd);
                        if (pmd_numa(entry))
                                entry = pmd_mknonnuma(entry);
                        entry = pmd_modify(entry, newprot);
@@ -1644,6 +1644,7 @@ static int __split_huge_page_splitting(struct page *page,
                 * serialize against split_huge_page*.
                 */
                pmdp_splitting_flush(vma, address, pmd);
+
                ret = 1;
                spin_unlock(ptl);
        }
@@ -2834,7 +2835,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
        pmd_t _pmd;
        int i;
 
-       pmdp_clear_flush(vma, haddr, pmd);
+       pmdp_clear_flush_notify(vma, haddr, pmd);
        /* leave pmd empty until pte is filled */
 
        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
index 9fd722769927f9e5bb5e03fb6516db7a5f7c8f42..2e6add04fa1b37bd451f8bbd89e6cec6ab26f614 100644 (file)
@@ -2598,8 +2598,11 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                        }
                        set_huge_pte_at(dst, addr, dst_pte, entry);
                } else {
-                       if (cow)
+                       if (cow) {
                                huge_ptep_set_wrprotect(src, addr, src_pte);
+                               mmu_notifier_invalidate_range(src, mmun_start,
+                                                                  mmun_end);
+                       }
                        entry = huge_ptep_get(src_pte);
                        ptepage = pte_page(entry);
                        get_page(ptepage);
@@ -2899,6 +2902,7 @@ retry_avoidcopy:
 
                /* Break COW */
                huge_ptep_clear_flush(vma, address, ptep);
+               mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
                set_huge_pte_at(mm, address, ptep,
                                make_huge_pte(vma, new_page, 1));
                page_remove_rmap(old_page);
@@ -3374,6 +3378,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
         * and that page table be reused and filled with junk.
         */
        flush_tlb_range(vma, start, end);
+       mmu_notifier_invalidate_range(mm, start, end);
        mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
        mmu_notifier_invalidate_range_end(mm, start, end);
 
index 829304090b90e8ff57ee3eaf5281987deccb7e55..a4f90ba7068ef0af12ccdff8b3dc7408f772b447 100644 (file)
@@ -108,6 +108,31 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
 /*
  * in mm/page_alloc.c
  */
+
+/*
+ * Locate the struct page for both the matching buddy in our
+ * pair (buddy1) and the combined O(n+1) page they form (page).
+ *
+ * 1) Any buddy B1 will have an order O twin B2 which satisfies
+ * the following equation:
+ *     B2 = B1 ^ (1 << O)
+ * For example, if the starting buddy (buddy2) is #8 its order
+ * 1 buddy is #10:
+ *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
+ *
+ * 2) Any buddy B will have an order O+1 parent P which
+ * satisfies the following equation:
+ *     P = B & ~(1 << O)
+ *
+ * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
+ */
+static inline unsigned long
+__find_buddy_index(unsigned long page_idx, unsigned int order)
+{
+       return page_idx ^ (1 << order);
+}
+
+extern int __isolate_free_page(struct page *page, unsigned int order);
 extern void __free_pages_bootmem(struct page *page, unsigned int order);
 extern void prep_compound_page(struct page *page, unsigned long order);
 #ifdef CONFIG_MEMORY_FAILURE
index eafcf60f6b832b202a48f8a9ba66ac0d87989443..e34a3cb6aad6cb078c0801efc6891ba83f5db155 100644 (file)
@@ -911,9 +911,9 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i)
        if (i->nr_segs == 1)
                return i->count;
        else if (i->type & ITER_BVEC)
-               return min(i->count, i->iov->iov_len - i->iov_offset);
-       else
                return min(i->count, i->bvec->bv_len - i->iov_offset);
+       else
+               return min(i->count, i->iov->iov_len - i->iov_offset);
 }
 EXPORT_SYMBOL(iov_iter_single_seg_count);
 
index 6b2e337bc03c7d6c5fce5e33023bdb5d2f111789..d247efab5073abfaeb9c11e33fb87a2fbb8ebdb0 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -892,7 +892,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
                 * this assure us that no O_DIRECT can happen after the check
                 * or in the middle of the check.
                 */
-               entry = ptep_clear_flush(vma, addr, ptep);
+               entry = ptep_clear_flush_notify(vma, addr, ptep);
                /*
                 * Check that no O_DIRECT or similar I/O is in progress on the
                 * page
@@ -960,7 +960,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
        page_add_anon_rmap(kpage, vma, addr);
 
        flush_cache_page(vma, addr, pte_pfn(*ptep));
-       ptep_clear_flush(vma, addr, ptep);
+       ptep_clear_flush_notify(vma, addr, ptep);
        set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
 
        page_remove_rmap(page);
index 3e503831e042a6aa7b96d2608ecb570dfffa0aa7..d3cb2ef66ee21325e8d4230efe18d2f2fbef1314 100644 (file)
@@ -238,6 +238,7 @@ static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
        tlb->need_flush = 0;
        tlb_flush(tlb);
+       mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
        tlb_table_flush(tlb);
 #endif
@@ -815,20 +816,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                if (!pte_file(pte)) {
                        swp_entry_t entry = pte_to_swp_entry(pte);
 
-                       if (swap_duplicate(entry) < 0)
-                               return entry.val;
-
-                       /* make sure dst_mm is on swapoff's mmlist. */
-                       if (unlikely(list_empty(&dst_mm->mmlist))) {
-                               spin_lock(&mmlist_lock);
-                               if (list_empty(&dst_mm->mmlist))
-                                       list_add(&dst_mm->mmlist,
-                                                &src_mm->mmlist);
-                               spin_unlock(&mmlist_lock);
-                       }
-                       if (likely(!non_swap_entry(entry)))
+                       if (likely(!non_swap_entry(entry))) {
+                               if (swap_duplicate(entry) < 0)
+                                       return entry.val;
+
+                               /* make sure dst_mm is on swapoff's mmlist. */
+                               if (unlikely(list_empty(&dst_mm->mmlist))) {
+                                       spin_lock(&mmlist_lock);
+                                       if (list_empty(&dst_mm->mmlist))
+                                               list_add(&dst_mm->mmlist,
+                                                        &src_mm->mmlist);
+                                       spin_unlock(&mmlist_lock);
+                               }
                                rss[MM_SWAPENTS]++;
-                       else if (is_migration_entry(entry)) {
+                       else if (is_migration_entry(entry)) {
                                page = migration_entry_to_page(entry);
 
                                if (PageAnon(page))
@@ -2234,7 +2235,7 @@ gotten:
                 * seen in the presence of one thread doing SMC and another
                 * thread doing COW.
                 */
-               ptep_clear_flush(vma, address, page_table);
+               ptep_clear_flush_notify(vma, address, page_table);
                page_add_new_anon_rmap(new_page, vma, address);
                mem_cgroup_commit_charge(new_page, memcg, false);
                lru_cache_add_active_or_unevictable(new_page, vma);
index 252e1dbbed86e9a81011ac8d135d9580969a1141..1bf4807cb21e49ccbd1bb4232574507e8d41384c 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/stop_machine.h>
 #include <linux/hugetlb.h>
 #include <linux/memblock.h>
+#include <linux/bootmem.h>
 
 #include <asm/tlbflush.h>
 
@@ -1066,6 +1067,16 @@ out:
 }
 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
 
+static void reset_node_present_pages(pg_data_t *pgdat)
+{
+       struct zone *z;
+
+       for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
+               z->present_pages = 0;
+
+       pgdat->node_present_pages = 0;
+}
+
 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
 static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
 {
@@ -1096,6 +1107,21 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
        build_all_zonelists(pgdat, NULL);
        mutex_unlock(&zonelists_mutex);
 
+       /*
+        * zone->managed_pages is set to an approximate value in
+        * free_area_init_core(), which will cause
+        * /sys/device/system/node/nodeX/meminfo has wrong data.
+        * So reset it to 0 before any memory is onlined.
+        */
+       reset_node_managed_pages(pgdat);
+
+       /*
+        * When memory is hot-added, all the memory is in offline state. So
+        * clear all zones' present_pages because they will be updated in
+        * online_pages() and offline_pages().
+        */
+       reset_node_present_pages(pgdat);
+
        return pgdat;
 }
 
index 01439953abf548690ac17f1b994511b587e9326a..41945cb0ca38d318d88a4361ae01ddf2dea4a7b1 100644 (file)
@@ -1854,7 +1854,7 @@ fail_putback:
         */
        flush_cache_range(vma, mmun_start, mmun_end);
        page_add_anon_rmap(new_page, vma, mmun_start);
-       pmdp_clear_flush(vma, mmun_start, pmd);
+       pmdp_clear_flush_notify(vma, mmun_start, pmd);
        set_pmd_at(mm, mmun_start, pmd, entry);
        flush_tlb_range(vma, mmun_start, mmun_end);
        update_mmu_cache_pmd(vma, address, &entry);
@@ -1862,6 +1862,7 @@ fail_putback:
        if (page_count(page) != 2) {
                set_pmd_at(mm, mmun_start, pmd, orig_entry);
                flush_tlb_range(vma, mmun_start, mmun_end);
+               mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
                update_mmu_cache_pmd(vma, address, &entry);
                page_remove_rmap(new_page);
                goto fail_putback;
index 87e82b38453c2cbca83f1dd7ad472c02b6a73b77..ae919891a087e0d7f3a76c5b47f4f1d8326f6dfa 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -776,8 +776,11 @@ again:                     remove_next = 1 + (end > next->vm_end);
                 * shrinking vma had, to cover any anon pages imported.
                 */
                if (exporter && exporter->anon_vma && !importer->anon_vma) {
-                       if (anon_vma_clone(importer, exporter))
-                               return -ENOMEM;
+                       int error;
+
+                       error = anon_vma_clone(importer, exporter);
+                       if (error)
+                               return error;
                        importer->anon_vma = exporter->anon_vma;
                }
        }
@@ -2469,7 +2472,8 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        if (err)
                goto out_free_vma;
 
-       if (anon_vma_clone(new, vma))
+       err = anon_vma_clone(new, vma);
+       if (err)
                goto out_free_mpol;
 
        if (new->vm_file)
index 2c8da9825fe3482740ba4f4f2a0a003fabafd519..3b9b3d0741b2a1546837761d90f7eec2c0b3b18b 100644 (file)
@@ -193,6 +193,16 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
 
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
+               /*
+                * Call invalidate_range here too to avoid the need for the
+                * subsystem of having to register an invalidate_range_end
+                * call-back when there is invalidate_range already. Usually a
+                * subsystem registers either invalidate_range_start()/end() or
+                * invalidate_range(), so this will be no additional overhead
+                * (besides the pointer check).
+                */
+               if (mn->ops->invalidate_range)
+                       mn->ops->invalidate_range(mn, mm, start, end);
                if (mn->ops->invalidate_range_end)
                        mn->ops->invalidate_range_end(mn, mm, start, end);
        }
@@ -200,6 +210,21 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
 }
 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
 
+void __mmu_notifier_invalidate_range(struct mm_struct *mm,
+                                 unsigned long start, unsigned long end)
+{
+       struct mmu_notifier *mn;
+       int id;
+
+       id = srcu_read_lock(&srcu);
+       hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
+               if (mn->ops->invalidate_range)
+                       mn->ops->invalidate_range(mn, mm, start, end);
+       }
+       srcu_read_unlock(&srcu, id);
+}
+EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
+
 static int do_mmu_notifier_register(struct mmu_notifier *mn,
                                    struct mm_struct *mm,
                                    int take_mmap_sem)
index 7c7ab32ee5032dad07354f438b5832649aaa044b..90b50468333e38563d4388096e584b6c23fa9132 100644 (file)
@@ -145,12 +145,10 @@ static unsigned long __init free_low_memory_core_early(void)
 
 static int reset_managed_pages_done __initdata;
 
-static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
+void reset_node_managed_pages(pg_data_t *pgdat)
 {
        struct zone *z;
 
-       if (reset_managed_pages_done)
-               return;
        for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
                z->managed_pages = 0;
 }
@@ -159,8 +157,12 @@ void __init reset_all_zones_managed_pages(void)
 {
        struct pglist_data *pgdat;
 
+       if (reset_managed_pages_done)
+               return;
+
        for_each_online_pgdat(pgdat)
                reset_node_managed_pages(pgdat);
+
        reset_managed_pages_done = 1;
 }
 
index 9cd36b822444433539fbe0cc3acf8f312172345d..616a2c956b4b2a6aee5cc1f7d0098cf4a4cd5912 100644 (file)
@@ -466,29 +466,6 @@ static inline void rmv_page_order(struct page *page)
        set_page_private(page, 0);
 }
 
-/*
- * Locate the struct page for both the matching buddy in our
- * pair (buddy1) and the combined O(n+1) page they form (page).
- *
- * 1) Any buddy B1 will have an order O twin B2 which satisfies
- * the following equation:
- *     B2 = B1 ^ (1 << O)
- * For example, if the starting buddy (buddy2) is #8 its order
- * 1 buddy is #10:
- *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
- *
- * 2) Any buddy B will have an order O+1 parent P which
- * satisfies the following equation:
- *     P = B & ~(1 << O)
- *
- * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
- */
-static inline unsigned long
-__find_buddy_index(unsigned long page_idx, unsigned int order)
-{
-       return page_idx ^ (1 << order);
-}
-
 /*
  * This function checks whether a page is free && is the buddy
  * we can do coalesce a page and its buddy if
@@ -569,6 +546,7 @@ static inline void __free_one_page(struct page *page,
        unsigned long combined_idx;
        unsigned long uninitialized_var(buddy_idx);
        struct page *buddy;
+       int max_order = MAX_ORDER;
 
        VM_BUG_ON(!zone_is_initialized(zone));
 
@@ -577,13 +555,24 @@ static inline void __free_one_page(struct page *page,
                        return;
 
        VM_BUG_ON(migratetype == -1);
+       if (is_migrate_isolate(migratetype)) {
+               /*
+                * We restrict max order of merging to prevent merge
+                * between freepages on isolate pageblock and normal
+                * pageblock. Without this, pageblock isolation
+                * could cause incorrect freepage accounting.
+                */
+               max_order = min(MAX_ORDER, pageblock_order + 1);
+       } else {
+               __mod_zone_freepage_state(zone, 1 << order, migratetype);
+       }
 
-       page_idx = pfn & ((1 << MAX_ORDER) - 1);
+       page_idx = pfn & ((1 << max_order) - 1);
 
        VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
        VM_BUG_ON_PAGE(bad_range(zone, page), page);
 
-       while (order < MAX_ORDER-1) {
+       while (order < max_order - 1) {
                buddy_idx = __find_buddy_index(page_idx, order);
                buddy = page + (buddy_idx - page_idx);
                if (!page_is_buddy(page, buddy, order))
@@ -594,9 +583,11 @@ static inline void __free_one_page(struct page *page,
                 */
                if (page_is_guard(buddy)) {
                        clear_page_guard_flag(buddy);
-                       set_page_private(page, 0);
-                       __mod_zone_freepage_state(zone, 1 << order,
-                                                 migratetype);
+                       set_page_private(buddy, 0);
+                       if (!is_migrate_isolate(migratetype)) {
+                               __mod_zone_freepage_state(zone, 1 << order,
+                                                         migratetype);
+                       }
                } else {
                        list_del(&buddy->lru);
                        zone->free_area[order].nr_free--;
@@ -715,14 +706,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        /* must delete as __free_one_page list manipulates */
                        list_del(&page->lru);
                        mt = get_freepage_migratetype(page);
+                       if (unlikely(has_isolate_pageblock(zone)))
+                               mt = get_pageblock_migratetype(page);
+
                        /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
                        __free_one_page(page, page_to_pfn(page), zone, 0, mt);
                        trace_mm_page_pcpu_drain(page, 0, mt);
-                       if (likely(!is_migrate_isolate_page(page))) {
-                               __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
-                               if (is_migrate_cma(mt))
-                                       __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
-                       }
                } while (--to_free && --batch_free && !list_empty(list));
        }
        spin_unlock(&zone->lock);
@@ -739,9 +728,11 @@ static void free_one_page(struct zone *zone,
        if (nr_scanned)
                __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
 
+       if (unlikely(has_isolate_pageblock(zone) ||
+               is_migrate_isolate(migratetype))) {
+               migratetype = get_pfnblock_migratetype(page, pfn);
+       }
        __free_one_page(page, pfn, zone, order, migratetype);
-       if (unlikely(!is_migrate_isolate(migratetype)))
-               __mod_zone_freepage_state(zone, 1 << order, migratetype);
        spin_unlock(&zone->lock);
 }
 
@@ -1484,7 +1475,7 @@ void split_page(struct page *page, unsigned int order)
 }
 EXPORT_SYMBOL_GPL(split_page);
 
-static int __isolate_free_page(struct page *page, unsigned int order)
+int __isolate_free_page(struct page *page, unsigned int order)
 {
        unsigned long watermark;
        struct zone *zone;
@@ -6408,13 +6399,12 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 
        /* Make sure the range is really isolated. */
        if (test_pages_isolated(outer_start, end, false)) {
-               pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
-                      outer_start, end);
+               pr_info("%s: [%lx, %lx) PFNs busy\n",
+                       __func__, outer_start, end);
                ret = -EBUSY;
                goto done;
        }
 
-
        /* Grab isolated pages from freelists. */
        outer_end = isolate_freepages_range(&cc, outer_start, end);
        if (!outer_end) {
index d1473b2e9481731988695755a618baa0991556a7..c8778f7e208e8a4a640e2c12f091956f4aa33575 100644 (file)
@@ -60,6 +60,7 @@ out:
                int migratetype = get_pageblock_migratetype(page);
 
                set_pageblock_migratetype(page, MIGRATE_ISOLATE);
+               zone->nr_isolate_pageblock++;
                nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
 
                __mod_zone_freepage_state(zone, -nr_pages, migratetype);
@@ -75,16 +76,54 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 {
        struct zone *zone;
        unsigned long flags, nr_pages;
+       struct page *isolated_page = NULL;
+       unsigned int order;
+       unsigned long page_idx, buddy_idx;
+       struct page *buddy;
 
        zone = page_zone(page);
        spin_lock_irqsave(&zone->lock, flags);
        if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
                goto out;
-       nr_pages = move_freepages_block(zone, page, migratetype);
-       __mod_zone_freepage_state(zone, nr_pages, migratetype);
+
+       /*
+        * Because freepage with more than pageblock_order on isolated
+        * pageblock is restricted to merge due to freepage counting problem,
+        * it is possible that there is free buddy page.
+        * move_freepages_block() doesn't care of merge so we need other
+        * approach in order to merge them. Isolation and free will make
+        * these pages to be merged.
+        */
+       if (PageBuddy(page)) {
+               order = page_order(page);
+               if (order >= pageblock_order) {
+                       page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
+                       buddy_idx = __find_buddy_index(page_idx, order);
+                       buddy = page + (buddy_idx - page_idx);
+
+                       if (!is_migrate_isolate_page(buddy)) {
+                               __isolate_free_page(page, order);
+                               set_page_refcounted(page);
+                               isolated_page = page;
+                       }
+               }
+       }
+
+       /*
+        * If we isolate freepage with more than pageblock_order, there
+        * should be no freepage in the range, so we could avoid costly
+        * pageblock scanning for freepage moving.
+        */
+       if (!isolated_page) {
+               nr_pages = move_freepages_block(zone, page, migratetype);
+               __mod_zone_freepage_state(zone, nr_pages, migratetype);
+       }
        set_pageblock_migratetype(page, migratetype);
+       zone->nr_isolate_pageblock--;
 out:
        spin_unlock_irqrestore(&zone->lock, flags);
+       if (isolated_page)
+               __free_pages(isolated_page, order);
 }
 
 static inline struct page *
index 19886fb2f13aac6a659ba1ae8b9e4db2f8efa7a4..a2a1eab077b00968a0797087cf1ef3ac99e6a1e3 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -274,6 +274,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
 {
        struct anon_vma_chain *avc;
        struct anon_vma *anon_vma;
+       int error;
 
        /* Don't bother if the parent process has no anon_vma here. */
        if (!pvma->anon_vma)
@@ -283,8 +284,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
         * First, attach the new VMA to the parent VMA's anon_vmas,
         * so rmap can find non-COWed pages in child processes.
         */
-       if (anon_vma_clone(vma, pvma))
-               return -ENOMEM;
+       error = anon_vma_clone(vma, pvma);
+       if (error)
+               return error;
 
        /* Then add our own anon_vma. */
        anon_vma = anon_vma_alloc();
@@ -1378,7 +1380,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
 
                /* Nuke the page table entry. */
                flush_cache_page(vma, address, pte_pfn(*pte));
-               pteval = ptep_clear_flush(vma, address, pte);
+               pteval = ptep_clear_flush_notify(vma, address, pte);
 
                /* If nonlinear, store the file page offset in the pte. */
                if (page->index != linear_page_index(vma, address)) {
index eb2b2ea301309887972c148bb80aedf8a6731b9b..f34e053ec46e24bb364a847ac498ef61b5011b27 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3076,7 +3076,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
        void *obj;
        int x;
 
-       VM_BUG_ON(nodeid > num_online_nodes());
+       VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
        n = get_node(cachep, nodeid);
        BUG_ON(!n);
 
index 406944207b61dbd607bc7f2d2b244b6998f47254..dcdab81bd240bafe3bec02cb32bf3390763a90e9 100644 (file)
@@ -259,6 +259,10 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
                if (s->size - size >= sizeof(void *))
                        continue;
 
+               if (IS_ENABLED(CONFIG_SLAB) && align &&
+                       (align > s->align || s->align % align))
+                       continue;
+
                return s;
        }
        return NULL;
index 261eaf6e5a198d47d662d4a7da0b950f01660c6a..f1e4d60523694eb758b93fcd6487866c203956dd 100644 (file)
@@ -715,8 +715,9 @@ EXPORT_SYMBOL(truncate_pagecache);
  * necessary) to @newsize. It will be typically be called from the filesystem's
  * setattr function when ATTR_SIZE is passed in.
  *
- * Must be called with inode_mutex held and before all filesystem specific
- * block truncation has been performed.
+ * Must be called with a lock serializing truncates and writes (generally
+ * i_mutex but e.g. xfs uses a different lock) and before all filesystem
+ * specific block truncation has been performed.
  */
 void truncate_setsize(struct inode *inode, loff_t newsize)
 {
@@ -755,7 +756,6 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
        struct page *page;
        pgoff_t index;
 
-       WARN_ON(!mutex_is_locked(&inode->i_mutex));
        WARN_ON(to > inode->i_size);
 
        if (from >= to || bsize == PAGE_CACHE_SIZE)
index d4042e75f7c7e7c7d498c4fcc33c90f1d1de2bff..c5afd573d7da79afc814225043319cc66120addf 100644 (file)
@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
        unsigned long scanned;
        unsigned long reclaimed;
 
+       spin_lock(&vmpr->sr_lock);
        /*
         * Several contexts might be calling vmpressure(), so it is
         * possible that the work was rescheduled again before the old
@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
         * here. No need for any locks here since we don't care if
         * vmpr->reclaimed is in sync.
         */
-       if (!vmpr->scanned)
+       scanned = vmpr->scanned;
+       if (!scanned) {
+               spin_unlock(&vmpr->sr_lock);
                return;
+       }
 
-       spin_lock(&vmpr->sr_lock);
-       scanned = vmpr->scanned;
        reclaimed = vmpr->reclaimed;
        vmpr->scanned = 0;
        vmpr->reclaimed = 0;
index 648d79ccf46244a5769c1703361cf35db4cfeb09..c465876c7861814ba545cf83783c7ba11bbd91eb 100644 (file)
@@ -813,10 +813,9 @@ static void __br_multicast_send_query(struct net_bridge *br,
                return;
 
        if (port) {
-               __skb_push(skb, sizeof(struct ethhdr));
                skb->dev = port->dev;
                NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
-                       dev_queue_xmit);
+                       br_dev_queue_push_xmit);
        } else {
                br_multicast_select_own_querier(br, ip, skb);
                netif_rx(skb);
index 2ff9706647f2cb9f7930d22d0a3092fb3fe0d225..e5ec470b851f1f55fe37ec4ecaf557d75483af26 100644 (file)
@@ -280,6 +280,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
        [IFLA_BRPORT_MODE]      = { .type = NLA_U8 },
        [IFLA_BRPORT_GUARD]     = { .type = NLA_U8 },
        [IFLA_BRPORT_PROTECT]   = { .type = NLA_U8 },
+       [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
        [IFLA_BRPORT_LEARNING]  = { .type = NLA_U8 },
        [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
 };
index 654c9018e3e75a5f0bd01ba2197a7777e8626bf5..48da2c54a69e36aa1ae2f1d9e84cdb82e49a84f3 100644 (file)
@@ -18,6 +18,7 @@
 #include <net/netfilter/ipv6/nf_reject.h>
 #include <linux/ip.h>
 #include <net/ip.h>
+#include <net/ip6_checksum.h>
 #include <linux/netfilter_bridge.h>
 #include "../br_private.h"
 
index de6662b14e1f5d7110ac1316c1362e4db6bffc5f..7e38b729696a97567e3c0d284631a3229ef413dd 100644 (file)
@@ -149,6 +149,7 @@ static int process_one_ticket(struct ceph_auth_client *ac,
        struct ceph_crypto_key old_key;
        void *ticket_buf = NULL;
        void *tp, *tpend;
+       void **ptp;
        struct ceph_timespec new_validity;
        struct ceph_crypto_key new_session_key;
        struct ceph_buffer *new_ticket_blob;
@@ -208,25 +209,19 @@ static int process_one_ticket(struct ceph_auth_client *ac,
                        goto out;
                }
                tp = ticket_buf;
-               dlen = ceph_decode_32(&tp);
+               ptp = &tp;
+               tpend = *ptp + dlen;
        } else {
                /* unencrypted */
-               ceph_decode_32_safe(p, end, dlen, bad);
-               ticket_buf = kmalloc(dlen, GFP_NOFS);
-               if (!ticket_buf) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-               tp = ticket_buf;
-               ceph_decode_need(p, end, dlen, bad);
-               ceph_decode_copy(p, ticket_buf, dlen);
+               ptp = p;
+               tpend = end;
        }
-       tpend = tp + dlen;
+       ceph_decode_32_safe(ptp, tpend, dlen, bad);
        dout(" ticket blob is %d bytes\n", dlen);
-       ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
-       blob_struct_v = ceph_decode_8(&tp);
-       new_secret_id = ceph_decode_64(&tp);
-       ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
+       ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad);
+       blob_struct_v = ceph_decode_8(ptp);
+       new_secret_id = ceph_decode_64(ptp);
+       ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend);
        if (ret)
                goto out;
 
index 62fc5e7a9acf7506eba2de7ae314ba6067870ceb..790fe89d90c0ac49301bfcc81ba1b6633b9559cd 100644 (file)
@@ -90,11 +90,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
 
 static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
 
+/*
+ * Should be used for buffers allocated with ceph_kvmalloc().
+ * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
+ * in-buffer (msg front).
+ *
+ * Dispose of @sgt with teardown_sgtable().
+ *
+ * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
+ * in cases where a single sg is sufficient.  No attempt to reduce the
+ * number of sgs by squeezing physically contiguous pages together is
+ * made though, for simplicity.
+ */
+static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
+                        const void *buf, unsigned int buf_len)
+{
+       struct scatterlist *sg;
+       const bool is_vmalloc = is_vmalloc_addr(buf);
+       unsigned int off = offset_in_page(buf);
+       unsigned int chunk_cnt = 1;
+       unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
+       int i;
+       int ret;
+
+       if (buf_len == 0) {
+               memset(sgt, 0, sizeof(*sgt));
+               return -EINVAL;
+       }
+
+       if (is_vmalloc) {
+               chunk_cnt = chunk_len >> PAGE_SHIFT;
+               chunk_len = PAGE_SIZE;
+       }
+
+       if (chunk_cnt > 1) {
+               ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
+               if (ret)
+                       return ret;
+       } else {
+               WARN_ON(chunk_cnt != 1);
+               sg_init_table(prealloc_sg, 1);
+               sgt->sgl = prealloc_sg;
+               sgt->nents = sgt->orig_nents = 1;
+       }
+
+       for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
+               struct page *page;
+               unsigned int len = min(chunk_len - off, buf_len);
+
+               if (is_vmalloc)
+                       page = vmalloc_to_page(buf);
+               else
+                       page = virt_to_page(buf);
+
+               sg_set_page(sg, page, len, off);
+
+               off = 0;
+               buf += len;
+               buf_len -= len;
+       }
+       WARN_ON(buf_len != 0);
+
+       return 0;
+}
+
+static void teardown_sgtable(struct sg_table *sgt)
+{
+       if (sgt->orig_nents > 1)
+               sg_free_table(sgt);
+}
+
 static int ceph_aes_encrypt(const void *key, int key_len,
                            void *dst, size_t *dst_len,
                            const void *src, size_t src_len)
 {
-       struct scatterlist sg_in[2], sg_out[1];
+       struct scatterlist sg_in[2], prealloc_sg;
+       struct sg_table sg_out;
        struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
        struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
        int ret;
@@ -110,16 +181,18 @@ static int ceph_aes_encrypt(const void *key, int key_len,
 
        *dst_len = src_len + zero_padding;
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        sg_init_table(sg_in, 2);
        sg_set_buf(&sg_in[0], src, src_len);
        sg_set_buf(&sg_in[1], pad, zero_padding);
-       sg_init_table(sg_out, 1);
-       sg_set_buf(sg_out, dst, *dst_len);
+       ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
+       if (ret)
+               goto out_tfm;
+
+       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        iv = crypto_blkcipher_crt(tfm)->iv;
        ivsize = crypto_blkcipher_ivsize(tfm);
-
        memcpy(iv, aes_iv, ivsize);
+
        /*
        print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
                       key, key_len, 1);
@@ -128,16 +201,22 @@ static int ceph_aes_encrypt(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
                        pad, zero_padding, 1);
        */
-       ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
+       ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
                                     src_len + zero_padding);
-       crypto_free_blkcipher(tfm);
-       if (ret < 0)
+       if (ret < 0) {
                pr_err("ceph_aes_crypt failed %d\n", ret);
+               goto out_sg;
+       }
        /*
        print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
                       dst, *dst_len, 1);
        */
-       return 0;
+
+out_sg:
+       teardown_sgtable(&sg_out);
+out_tfm:
+       crypto_free_blkcipher(tfm);
+       return ret;
 }
 
 static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
@@ -145,7 +224,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
                             const void *src1, size_t src1_len,
                             const void *src2, size_t src2_len)
 {
-       struct scatterlist sg_in[3], sg_out[1];
+       struct scatterlist sg_in[3], prealloc_sg;
+       struct sg_table sg_out;
        struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
        struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
        int ret;
@@ -161,17 +241,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
 
        *dst_len = src1_len + src2_len + zero_padding;
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        sg_init_table(sg_in, 3);
        sg_set_buf(&sg_in[0], src1, src1_len);
        sg_set_buf(&sg_in[1], src2, src2_len);
        sg_set_buf(&sg_in[2], pad, zero_padding);
-       sg_init_table(sg_out, 1);
-       sg_set_buf(sg_out, dst, *dst_len);
+       ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
+       if (ret)
+               goto out_tfm;
+
+       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        iv = crypto_blkcipher_crt(tfm)->iv;
        ivsize = crypto_blkcipher_ivsize(tfm);
-
        memcpy(iv, aes_iv, ivsize);
+
        /*
        print_hex_dump(KERN_ERR, "enc  key: ", DUMP_PREFIX_NONE, 16, 1,
                       key, key_len, 1);
@@ -182,23 +264,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
        print_hex_dump(KERN_ERR, "enc  pad: ", DUMP_PREFIX_NONE, 16, 1,
                        pad, zero_padding, 1);
        */
-       ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
+       ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
                                     src1_len + src2_len + zero_padding);
-       crypto_free_blkcipher(tfm);
-       if (ret < 0)
+       if (ret < 0) {
                pr_err("ceph_aes_crypt2 failed %d\n", ret);
+               goto out_sg;
+       }
        /*
        print_hex_dump(KERN_ERR, "enc  out: ", DUMP_PREFIX_NONE, 16, 1,
                       dst, *dst_len, 1);
        */
-       return 0;
+
+out_sg:
+       teardown_sgtable(&sg_out);
+out_tfm:
+       crypto_free_blkcipher(tfm);
+       return ret;
 }
 
 static int ceph_aes_decrypt(const void *key, int key_len,
                            void *dst, size_t *dst_len,
                            const void *src, size_t src_len)
 {
-       struct scatterlist sg_in[1], sg_out[2];
+       struct sg_table sg_in;
+       struct scatterlist sg_out[2], prealloc_sg;
        struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
        struct blkcipher_desc desc = { .tfm = tfm };
        char pad[16];
@@ -210,16 +299,16 @@ static int ceph_aes_decrypt(const void *key, int key_len,
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
-       sg_init_table(sg_in, 1);
        sg_init_table(sg_out, 2);
-       sg_set_buf(sg_in, src, src_len);
        sg_set_buf(&sg_out[0], dst, *dst_len);
        sg_set_buf(&sg_out[1], pad, sizeof(pad));
+       ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
+       if (ret)
+               goto out_tfm;
 
+       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        iv = crypto_blkcipher_crt(tfm)->iv;
        ivsize = crypto_blkcipher_ivsize(tfm);
-
        memcpy(iv, aes_iv, ivsize);
 
        /*
@@ -228,12 +317,10 @@ static int ceph_aes_decrypt(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "dec  in: ", DUMP_PREFIX_NONE, 16, 1,
                       src, src_len, 1);
        */
-
-       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
-       crypto_free_blkcipher(tfm);
+       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
        if (ret < 0) {
                pr_err("ceph_aes_decrypt failed %d\n", ret);
-               return ret;
+               goto out_sg;
        }
 
        if (src_len <= *dst_len)
@@ -251,7 +338,12 @@ static int ceph_aes_decrypt(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
                       dst, *dst_len, 1);
        */
-       return 0;
+
+out_sg:
+       teardown_sgtable(&sg_in);
+out_tfm:
+       crypto_free_blkcipher(tfm);
+       return ret;
 }
 
 static int ceph_aes_decrypt2(const void *key, int key_len,
@@ -259,7 +351,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
                             void *dst2, size_t *dst2_len,
                             const void *src, size_t src_len)
 {
-       struct scatterlist sg_in[1], sg_out[3];
+       struct sg_table sg_in;
+       struct scatterlist sg_out[3], prealloc_sg;
        struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
        struct blkcipher_desc desc = { .tfm = tfm };
        char pad[16];
@@ -271,17 +364,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
 
-       sg_init_table(sg_in, 1);
-       sg_set_buf(sg_in, src, src_len);
        sg_init_table(sg_out, 3);
        sg_set_buf(&sg_out[0], dst1, *dst1_len);
        sg_set_buf(&sg_out[1], dst2, *dst2_len);
        sg_set_buf(&sg_out[2], pad, sizeof(pad));
+       ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
+       if (ret)
+               goto out_tfm;
 
        crypto_blkcipher_setkey((void *)tfm, key, key_len);
        iv = crypto_blkcipher_crt(tfm)->iv;
        ivsize = crypto_blkcipher_ivsize(tfm);
-
        memcpy(iv, aes_iv, ivsize);
 
        /*
@@ -290,12 +383,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "dec   in: ", DUMP_PREFIX_NONE, 16, 1,
                       src, src_len, 1);
        */
-
-       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
-       crypto_free_blkcipher(tfm);
+       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
        if (ret < 0) {
                pr_err("ceph_aes_decrypt failed %d\n", ret);
-               return ret;
+               goto out_sg;
        }
 
        if (src_len <= *dst1_len)
@@ -325,7 +416,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
                       dst2, *dst2_len, 1);
        */
 
-       return 0;
+out_sg:
+       teardown_sgtable(&sg_in);
+out_tfm:
+       crypto_free_blkcipher(tfm);
+       return ret;
 }
 
 
index 559c9f619c2027e33f733469b2a8a59eda582f89..8d1653caffdb4104d893c2bcbac939ed465a8bc9 100644 (file)
@@ -484,7 +484,7 @@ static int ceph_tcp_connect(struct ceph_connection *con)
                               IPPROTO_TCP, &sock);
        if (ret)
                return ret;
-       sock->sk->sk_allocation = GFP_NOFS;
+       sock->sk->sk_allocation = GFP_NOFS | __GFP_MEMALLOC;
 
 #ifdef CONFIG_LOCKDEP
        lockdep_set_class(&sock->sk->sk_lock, &socket_class);
@@ -509,6 +509,9 @@ static int ceph_tcp_connect(struct ceph_connection *con)
 
                return ret;
        }
+
+       sk_set_memalloc(sock->sk);
+
        con->sock = sock;
        return 0;
 }
@@ -2769,8 +2772,11 @@ static void con_work(struct work_struct *work)
 {
        struct ceph_connection *con = container_of(work, struct ceph_connection,
                                                   work.work);
+       unsigned long pflags = current->flags;
        bool fault;
 
+       current->flags |= PF_MEMALLOC;
+
        mutex_lock(&con->mutex);
        while (true) {
                int ret;
@@ -2824,6 +2830,8 @@ static void con_work(struct work_struct *work)
                con_fault_finish(con);
 
        con->ops->put(con);
+
+       tsk_restore_flags(current, pflags, PF_MEMALLOC);
 }
 
 /*
index f3fc54eac09d32e691a64b5d65ba90793cba96b8..6f164289bde8860e333c747934da5459e807d7eb 100644 (file)
@@ -1007,8 +1007,8 @@ static void put_osd(struct ceph_osd *osd)
 static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
 {
        dout("__remove_osd %p\n", osd);
-       BUG_ON(!list_empty(&osd->o_requests));
-       BUG_ON(!list_empty(&osd->o_linger_requests));
+       WARN_ON(!list_empty(&osd->o_requests));
+       WARN_ON(!list_empty(&osd->o_linger_requests));
 
        rb_erase(&osd->o_node, &osdc->osds);
        list_del_init(&osd->o_osd_lru);
@@ -1254,6 +1254,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
                if (list_empty(&req->r_osd_item))
                        req->r_osd = NULL;
        }
+
+       list_del_init(&req->r_req_lru_item); /* can be on notarget */
        ceph_osdc_put_request(req);
 }
 
@@ -1395,6 +1397,7 @@ static int __map_request(struct ceph_osd_client *osdc,
        if (req->r_osd) {
                __cancel_request(req);
                list_del_init(&req->r_osd_item);
+               list_del_init(&req->r_linger_osd_item);
                req->r_osd = NULL;
        }
 
index a6882686ca3a10fc3be7ced6299dc7385ffd239d..76321ea442c3e06c289e86184c5c4e513cbad7ff 100644 (file)
@@ -1498,6 +1498,7 @@ static int do_setlink(const struct sk_buff *skb,
                        goto errout;
                }
                if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
+                       put_net(net);
                        err = -EPERM;
                        goto errout;
                }
@@ -2685,13 +2686,20 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
        int idx = 0;
        u32 portid = NETLINK_CB(cb->skb).portid;
        u32 seq = cb->nlh->nlmsg_seq;
-       struct nlattr *extfilt;
        u32 filter_mask = 0;
 
-       extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
-                                 IFLA_EXT_MASK);
-       if (extfilt)
-               filter_mask = nla_get_u32(extfilt);
+       if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
+               struct nlattr *extfilt;
+
+               extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
+                                         IFLA_EXT_MASK);
+               if (extfilt) {
+                       if (nla_len(extfilt) < sizeof(filter_mask))
+                               return -EINVAL;
+
+                       filter_mask = nla_get_u32(extfilt);
+               }
+       }
 
        rcu_read_lock();
        for_each_netdev_rcu(net, dev) {
@@ -2798,6 +2806,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (br_spec) {
                nla_for_each_nested(attr, br_spec, rem) {
                        if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
+                               if (nla_len(attr) < sizeof(flags))
+                                       return -EINVAL;
+
                                have_flags = true;
                                flags = nla_get_u16(attr);
                                break;
@@ -2868,6 +2879,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (br_spec) {
                nla_for_each_nested(attr, br_spec, rem) {
                        if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
+                               if (nla_len(attr) < sizeof(flags))
+                                       return -EINVAL;
+
                                have_flags = true;
                                flags = nla_get_u16(attr);
                                break;
index c16615bfb61edd2a1dae9ef7935a3153d78dc4df..32e31c2996315770149b71b1e17837311d672a1e 100644 (file)
@@ -552,20 +552,13 @@ static void kfree_skbmem(struct sk_buff *skb)
        case SKB_FCLONE_CLONE:
                fclones = container_of(skb, struct sk_buff_fclones, skb2);
 
-               /* Warning : We must perform the atomic_dec_and_test() before
-                * setting skb->fclone back to SKB_FCLONE_FREE, otherwise
-                * skb_clone() could set clone_ref to 2 before our decrement.
-                * Anyway, if we are going to free the structure, no need to
-                * rewrite skb->fclone.
+               /* The clone portion is available for
+                * fast-cloning again.
                 */
-               if (atomic_dec_and_test(&fclones->fclone_ref)) {
+               skb->fclone = SKB_FCLONE_FREE;
+
+               if (atomic_dec_and_test(&fclones->fclone_ref))
                        kmem_cache_free(skbuff_fclone_cache, fclones);
-               } else {
-                       /* The clone portion is available for
-                        * fast-cloning again.
-                        */
-                       skb->fclone = SKB_FCLONE_FREE;
-               }
                break;
        }
 }
@@ -887,11 +880,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
        if (skb->fclone == SKB_FCLONE_ORIG &&
            n->fclone == SKB_FCLONE_FREE) {
                n->fclone = SKB_FCLONE_CLONE;
-               /* As our fastclone was free, clone_ref must be 1 at this point.
-                * We could use atomic_inc() here, but it is faster
-                * to set the final value.
-                */
-               atomic_set(&fclones->fclone_ref, 2);
+               atomic_inc(&fclones->fclone_ref);
        } else {
                if (skb_pfmemalloc(skb))
                        gfp_mask |= __GFP_MEMALLOC;
index ca11d283bbebe3ae02345ecd72cb022ae7c43667..93ea80196f0ec383cca46d28bf8c4c96d0310b25 100644 (file)
@@ -1080,13 +1080,13 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
        if (!app)
                return -EMSGSIZE;
 
-       spin_lock(&dcb_lock);
+       spin_lock_bh(&dcb_lock);
        list_for_each_entry(itr, &dcb_app_list, list) {
                if (itr->ifindex == netdev->ifindex) {
                        err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
                                         &itr->app);
                        if (err) {
-                               spin_unlock(&dcb_lock);
+                               spin_unlock_bh(&dcb_lock);
                                return -EMSGSIZE;
                        }
                }
@@ -1097,7 +1097,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
        else
                dcbx = -EOPNOTSUPP;
 
-       spin_unlock(&dcb_lock);
+       spin_unlock_bh(&dcb_lock);
        nla_nest_end(skb, app);
 
        /* get peer info if available */
@@ -1234,7 +1234,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
        }
 
        /* local app */
-       spin_lock(&dcb_lock);
+       spin_lock_bh(&dcb_lock);
        app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
        if (!app)
                goto dcb_unlock;
@@ -1271,7 +1271,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
        else
                dcbx = -EOPNOTSUPP;
 
-       spin_unlock(&dcb_lock);
+       spin_unlock_bh(&dcb_lock);
 
        /* features flags */
        if (ops->getfeatcfg) {
@@ -1326,7 +1326,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
        return 0;
 
 dcb_unlock:
-       spin_unlock(&dcb_lock);
+       spin_unlock_bh(&dcb_lock);
 nla_put_failure:
        return err;
 }
@@ -1762,10 +1762,10 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
        struct dcb_app_type *itr;
        u8 prio = 0;
 
-       spin_lock(&dcb_lock);
+       spin_lock_bh(&dcb_lock);
        if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
                prio = itr->app.priority;
-       spin_unlock(&dcb_lock);
+       spin_unlock_bh(&dcb_lock);
 
        return prio;
 }
@@ -1789,7 +1789,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
        if (dev->dcbnl_ops->getdcbx)
                event.dcbx = dev->dcbnl_ops->getdcbx(dev);
 
-       spin_lock(&dcb_lock);
+       spin_lock_bh(&dcb_lock);
        /* Search for existing match and replace */
        if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
                if (new->priority)
@@ -1804,7 +1804,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
        if (new->priority)
                err = dcb_app_add(new, dev->ifindex);
 out:
-       spin_unlock(&dcb_lock);
+       spin_unlock_bh(&dcb_lock);
        if (!err)
                call_dcbevent_notifiers(DCB_APP_EVENT, &event);
        return err;
@@ -1823,10 +1823,10 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
        struct dcb_app_type *itr;
        u8 prio = 0;
 
-       spin_lock(&dcb_lock);
+       spin_lock_bh(&dcb_lock);
        if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
                prio |= 1 << itr->app.priority;
-       spin_unlock(&dcb_lock);
+       spin_unlock_bh(&dcb_lock);
 
        return prio;
 }
@@ -1850,7 +1850,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
        if (dev->dcbnl_ops->getdcbx)
                event.dcbx = dev->dcbnl_ops->getdcbx(dev);
 
-       spin_lock(&dcb_lock);
+       spin_lock_bh(&dcb_lock);
        /* Search for existing match and abort if found */
        if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
                err = -EEXIST;
@@ -1859,7 +1859,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
 
        err = dcb_app_add(new, dev->ifindex);
 out:
-       spin_unlock(&dcb_lock);
+       spin_unlock_bh(&dcb_lock);
        if (!err)
                call_dcbevent_notifiers(DCB_APP_EVENT, &event);
        return err;
@@ -1882,7 +1882,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
        if (dev->dcbnl_ops->getdcbx)
                event.dcbx = dev->dcbnl_ops->getdcbx(dev);
 
-       spin_lock(&dcb_lock);
+       spin_lock_bh(&dcb_lock);
        /* Search for existing match and remove it. */
        if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
                list_del(&itr->list);
@@ -1890,7 +1890,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
                err = 0;
        }
 
-       spin_unlock(&dcb_lock);
+       spin_unlock_bh(&dcb_lock);
        if (!err)
                call_dcbevent_notifiers(DCB_APP_EVENT, &event);
        return err;
@@ -1902,12 +1902,12 @@ static void dcb_flushapp(void)
        struct dcb_app_type *app;
        struct dcb_app_type *tmp;
 
-       spin_lock(&dcb_lock);
+       spin_lock_bh(&dcb_lock);
        list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
                list_del(&app->list);
                kfree(app);
        }
-       spin_unlock(&dcb_lock);
+       spin_unlock_bh(&dcb_lock);
 }
 
 static int __init dcbnl_init(void)
index 6d1817449c3675bb4ddd6d47a16ea017a5213e0d..ab03e00ffe8f0e2c4095e4f8c33d79c3e6d069da 100644 (file)
@@ -489,11 +489,14 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p,
        /* We could not connect to a designated PHY, so use the switch internal
         * MDIO bus instead
         */
-       if (!p->phy)
+       if (!p->phy) {
                p->phy = ds->slave_mii_bus->phy_map[p->port];
-       else
+               phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
+                                  p->phy_interface);
+       } else {
                pr_info("attached PHY at address %d [%s]\n",
                        p->phy->addr, p->phy->drv->name);
+       }
 }
 
 int dsa_slave_suspend(struct net_device *slave_dev)
index 8b7fe5b039068559a931b931529f02841cc13fe2..e67da4e6c3240bb20a7d8a03a3b579f7484f629e 100644 (file)
@@ -1386,6 +1386,17 @@ out:
        return pp;
 }
 
+int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+{
+       if (sk->sk_family == AF_INET)
+               return ip_recv_error(sk, msg, len, addr_len);
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == AF_INET6)
+               return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
+#endif
+       return -EINVAL;
+}
+
 static int inet_gro_complete(struct sk_buff *skb, int nhoff)
 {
        __be16 newlen = htons(skb->len - nhoff);
index f2e15738534d316ed0c50b8be2e1976fa03052c5..8f7bd56955b0dc35bb89e4fd0d3fe19fe9b53a47 100644 (file)
@@ -62,6 +62,10 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
        else
                res->tclassid = 0;
 #endif
+
+       if (err == -ESRCH)
+               err = -ENETUNREACH;
+
        return err;
 }
 EXPORT_SYMBOL_GPL(__fib_lookup);
index 32e78924e246bb7f89ad8a7b7a722e04fd879d0e..606c520ffd5af44dd79126ce119945e1f8a070ce 100644 (file)
@@ -133,6 +133,8 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff)
        int err = -ENOSYS;
        const struct net_offload **offloads;
 
+       udp_tunnel_gro_complete(skb, nhoff);
+
        rcu_read_lock();
        offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
        ops = rcu_dereference(offloads[proto]);
index 065cd94c640c0c2a20cbf99c31cba588db54897e..dedb21e9991438259ff66a4e4817349dfd9d3224 100644 (file)
@@ -144,6 +144,8 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
        gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
        geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
 
+       skb_set_inner_protocol(skb, htons(ETH_P_TEB));
+
        return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst,
                                   tos, ttl, df, src_port, dst_port, xnet);
 }
@@ -364,6 +366,7 @@ late_initcall(geneve_init_module);
 static void __exit geneve_cleanup_module(void)
 {
        destroy_workqueue(geneve_wq);
+       unregister_pernet_subsys(&geneve_net_ops);
 }
 module_exit(geneve_cleanup_module);
 
index fb70e3ecc3e4d58da5db9756db9b6da027369288..bb15d0e03d4f851d789734213814ed5359b59be1 100644 (file)
@@ -318,9 +318,7 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
        return scount;
 }
 
-#define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb))
-
-static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
+static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
 {
        struct sk_buff *skb;
        struct rtable *rt;
@@ -330,6 +328,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
        struct flowi4 fl4;
        int hlen = LL_RESERVED_SPACE(dev);
        int tlen = dev->needed_tailroom;
+       unsigned int size = mtu;
 
        while (1) {
                skb = alloc_skb(size + hlen + tlen,
@@ -341,7 +340,6 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
                        return NULL;
        }
        skb->priority = TC_PRIO_CONTROL;
-       igmp_skb_size(skb) = size;
 
        rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
                                   0, 0,
@@ -354,6 +352,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
        skb_dst_set(skb, &rt->dst);
        skb->dev = dev;
 
+       skb->reserved_tailroom = skb_end_offset(skb) -
+                                min(mtu, skb_end_offset(skb));
        skb_reserve(skb, hlen);
 
        skb_reset_network_header(skb);
@@ -423,8 +423,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
        return skb;
 }
 
-#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \
-       skb_tailroom(skb)) : 0)
+#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
 
 static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
        int type, int gdeleted, int sdeleted)
index c373a9ad45556815d1d95f4faf71b8f8a3f61996..9daf2177dc005c339c536c618fedc11259c178bc 100644 (file)
@@ -195,7 +195,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
        for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
                if (!CMSG_OK(msg, cmsg))
                        return -EINVAL;
-#if defined(CONFIG_IPV6)
+#if IS_ENABLED(CONFIG_IPV6)
                if (allow_ipv6 &&
                    cmsg->cmsg_level == SOL_IPV6 &&
                    cmsg->cmsg_type == IPV6_PKTINFO) {
index 3e861011e4a31e57b21c3fa507c178d3693a7970..1a7e979e80ba356f685ecfe020b98855f19db0a3 100644 (file)
@@ -528,6 +528,7 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
        .validate       = vti_tunnel_validate,
        .newlink        = vti_newlink,
        .changelink     = vti_changelink,
+       .dellink        = ip_tunnel_dellink,
        .get_size       = vti_get_size,
        .fill_info      = vti_fill_info,
 };
index c1023c4459201ad63394606cab4b42fae61257aa..665de06561cd51e0933aa8436438b499d3c5066c 100644 (file)
@@ -24,6 +24,7 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr,
        struct nf_nat_range range;
        unsigned int verdict;
 
+       memset(&range, 0, sizeof(range));
        range.flags = priv->flags;
 
        verdict = nf_nat_masquerade_ipv4(pkt->skb, pkt->ops->hooknum,
index 57f7c98041394998fe390735aa5b8cd2602b3f90..5d740cccf69ec29d9778ddb0568c726266904ce9 100644 (file)
@@ -217,6 +217,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
                                             &ipv6_hdr(skb)->daddr))
                                continue;
 #endif
+               } else {
+                       continue;
                }
 
                if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
@@ -853,16 +855,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        if (flags & MSG_OOB)
                goto out;
 
-       if (flags & MSG_ERRQUEUE) {
-               if (family == AF_INET) {
-                       return ip_recv_error(sk, msg, len, addr_len);
-#if IS_ENABLED(CONFIG_IPV6)
-               } else if (family == AF_INET6) {
-                       return pingv6_ops.ipv6_recv_error(sk, msg, len,
-                                                         addr_len);
-#endif
-               }
-       }
+       if (flags & MSG_ERRQUEUE)
+               return inet_recv_error(sk, msg, len, addr_len);
 
        skb = skb_recv_datagram(sk, flags, noblock, &err);
        if (!skb)
index 39ec0c379545afe07d6016c9180e9c408d22d4df..38c2bcb8dd5da4f2c8dede10942a1388fd3d9954 100644 (file)
@@ -1598,7 +1598,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        u32 urg_hole = 0;
 
        if (unlikely(flags & MSG_ERRQUEUE))
-               return ip_recv_error(sk, msg, len, addr_len);
+               return inet_recv_error(sk, msg, len, addr_len);
 
        if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
            (sk->sk_state == TCP_ESTABLISHED))
index a12b455928e52211efdc6b471ef54de6218f5df0..d107ee246a1d32703ccc260309d52c3493927862 100644 (file)
@@ -2315,6 +2315,35 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
 
 /* Undo procedures. */
 
+/* We can clear retrans_stamp when there are no retransmissions in the
+ * window. It would seem that it is trivially available for us in
+ * tp->retrans_out, however, that kind of assumptions doesn't consider
+ * what will happen if errors occur when sending retransmission for the
+ * second time. ...It could the that such segment has only
+ * TCPCB_EVER_RETRANS set at the present time. It seems that checking
+ * the head skb is enough except for some reneging corner cases that
+ * are not worth the effort.
+ *
+ * Main reason for all this complexity is the fact that connection dying
+ * time now depends on the validity of the retrans_stamp, in particular,
+ * that successive retransmissions of a segment must not advance
+ * retrans_stamp under any conditions.
+ */
+static bool tcp_any_retrans_done(const struct sock *sk)
+{
+       const struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *skb;
+
+       if (tp->retrans_out)
+               return true;
+
+       skb = tcp_write_queue_head(sk);
+       if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
+               return true;
+
+       return false;
+}
+
 #if FASTRETRANS_DEBUG > 1
 static void DBGUNDO(struct sock *sk, const char *msg)
 {
@@ -2410,6 +2439,8 @@ static bool tcp_try_undo_recovery(struct sock *sk)
                 * is ACKed. For Reno it is MUST to prevent false
                 * fast retransmits (RFC2582). SACK TCP is safe. */
                tcp_moderate_cwnd(tp);
+               if (!tcp_any_retrans_done(sk))
+                       tp->retrans_stamp = 0;
                return true;
        }
        tcp_set_ca_state(sk, TCP_CA_Open);
@@ -2430,35 +2461,6 @@ static bool tcp_try_undo_dsack(struct sock *sk)
        return false;
 }
 
-/* We can clear retrans_stamp when there are no retransmissions in the
- * window. It would seem that it is trivially available for us in
- * tp->retrans_out, however, that kind of assumptions doesn't consider
- * what will happen if errors occur when sending retransmission for the
- * second time. ...It could the that such segment has only
- * TCPCB_EVER_RETRANS set at the present time. It seems that checking
- * the head skb is enough except for some reneging corner cases that
- * are not worth the effort.
- *
- * Main reason for all this complexity is the fact that connection dying
- * time now depends on the validity of the retrans_stamp, in particular,
- * that successive retransmissions of a segment must not advance
- * retrans_stamp under any conditions.
- */
-static bool tcp_any_retrans_done(const struct sock *sk)
-{
-       const struct tcp_sock *tp = tcp_sk(sk);
-       struct sk_buff *skb;
-
-       if (tp->retrans_out)
-               return true;
-
-       skb = tcp_write_queue_head(sk);
-       if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
-               return true;
-
-       return false;
-}
-
 /* Undo during loss recovery after partial ACK or using F-RTO. */
 static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
 {
@@ -5229,7 +5231,7 @@ slow_path:
        if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
                goto csum_error;
 
-       if (!th->ack && !th->rst)
+       if (!th->ack && !th->rst && !th->syn)
                goto discard;
 
        /*
@@ -5648,7 +5650,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        goto discard;
        }
 
-       if (!th->ack && !th->rst)
+       if (!th->ack && !th->rst && !th->syn)
                goto discard;
 
        if (!tcp_validate_incoming(sk, skb, th, 0))
index 9c7d7621466b1241f404a5ca11de809dcff2d02a..147be202429064d03b34405dba575c8d002b267c 100644 (file)
@@ -598,7 +598,10 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
        if (th->rst)
                return;
 
-       if (skb_rtable(skb)->rt_type != RTN_LOCAL)
+       /* If sk not NULL, it means we did a successful lookup and incoming
+        * route had to be correct. prequeue might have dropped our dst.
+        */
+       if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
                return;
 
        /* Swap the send and the receive. */
index 12c3c8ef3849467296ced452089008872e64083f..0e32d2e1bdbfecabd3ae7e8ddb33a99f51cd7b51 100644 (file)
@@ -502,11 +502,11 @@ static int ip6gre_rcv(struct sk_buff *skb)
 
                skb->protocol = gre_proto;
                /* WCCP version 1 and 2 protocol decoding.
-                * - Change protocol to IP
+                * - Change protocol to IPv6
                 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
                 */
                if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
-                       skb->protocol = htons(ETH_P_IP);
+                       skb->protocol = htons(ETH_P_IPV6);
                        if ((*(h + offset) & 0xF0) != 0x40)
                                offset += 4;
                }
@@ -961,8 +961,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
        else
                dev->flags &= ~IFF_POINTOPOINT;
 
-       dev->iflink = p->link;
-
        /* Precalculate GRE options length */
        if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
                if (t->parms.o_flags&GRE_CSUM)
@@ -1272,6 +1270,7 @@ static int ip6gre_tunnel_init(struct net_device *dev)
                u64_stats_init(&ip6gre_tunnel_stats->syncp);
        }
 
+       dev->iflink = tunnel->parms.link;
 
        return 0;
 }
@@ -1481,6 +1480,8 @@ static int ip6gre_tap_init(struct net_device *dev)
        if (!dev->tstats)
                return -ENOMEM;
 
+       dev->iflink = tunnel->parms.link;
+
        return 0;
 }
 
index a071563a7e6e9c1f6d0fa9d8490c1d35ce89b3f7..01e12d0d8fcc9284403629fd17d7de010463d480 100644 (file)
@@ -69,7 +69,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
        int nhoff;
 
        if (unlikely(skb_shinfo(skb)->gso_type &
-                    ~(SKB_GSO_UDP |
+                    ~(SKB_GSO_TCPV4 |
+                      SKB_GSO_UDP |
                       SKB_GSO_DODGY |
                       SKB_GSO_TCP_ECN |
                       SKB_GSO_GRE |
index 9409887fb664dd78d13937b89ea1b5044afdbc94..9cb94cfa0ae71d05a5bd924e58eb225e21dbb62d 100644 (file)
@@ -272,9 +272,6 @@ static int ip6_tnl_create2(struct net_device *dev)
        int err;
 
        t = netdev_priv(dev);
-       err = ip6_tnl_dev_init(dev);
-       if (err < 0)
-               goto out;
 
        err = register_netdevice(dev);
        if (err < 0)
@@ -1462,6 +1459,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
 
 
 static const struct net_device_ops ip6_tnl_netdev_ops = {
+       .ndo_init       = ip6_tnl_dev_init,
        .ndo_uninit     = ip6_tnl_dev_uninit,
        .ndo_start_xmit = ip6_tnl_xmit,
        .ndo_do_ioctl   = ip6_tnl_ioctl,
@@ -1546,16 +1544,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
        struct ip6_tnl *t = netdev_priv(dev);
        struct net *net = dev_net(dev);
        struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
-       int err = ip6_tnl_dev_init_gen(dev);
-
-       if (err)
-               return err;
 
        t->parms.proto = IPPROTO_IPV6;
        dev_hold(dev);
 
-       ip6_tnl_link_config(t);
-
        rcu_assign_pointer(ip6n->tnls_wc[0], t);
        return 0;
 }
index b04ed72c454247886d7d99ae5c9e36e949b48cd1..8db6c98fe21858f4b3f630af277a0137e438aa8d 100644 (file)
@@ -79,15 +79,13 @@ int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
        uh->source = src_port;
 
        uh->len = htons(skb->len);
-       uh->check = 0;
 
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
        IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
                            | IPSKB_REROUTED);
        skb_dst_set(skb, dst);
 
-       udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr,
-                     &sk->sk_v6_daddr, skb->len);
+       udp6_set_csum(udp_get_no_check6_tx(sk), skb, saddr, daddr, skb->len);
 
        __skb_push(skb, sizeof(*ip6h));
        skb_reset_network_header(skb);
index d440bb585524d72202f809ebd1f0cb4ee8b7cc79..bcda14de7f84822a17b382a6256e49b665ed7a83 100644 (file)
@@ -172,10 +172,6 @@ static int vti6_tnl_create2(struct net_device *dev)
        struct vti6_net *ip6n = net_generic(net, vti6_net_id);
        int err;
 
-       err = vti6_dev_init(dev);
-       if (err < 0)
-               goto out;
-
        err = register_netdevice(dev);
        if (err < 0)
                goto out;
@@ -783,6 +779,7 @@ static int vti6_change_mtu(struct net_device *dev, int new_mtu)
 }
 
 static const struct net_device_ops vti6_netdev_ops = {
+       .ndo_init       = vti6_dev_init,
        .ndo_uninit     = vti6_dev_uninit,
        .ndo_start_xmit = vti6_tnl_xmit,
        .ndo_do_ioctl   = vti6_ioctl,
@@ -852,16 +849,10 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
        struct ip6_tnl *t = netdev_priv(dev);
        struct net *net = dev_net(dev);
        struct vti6_net *ip6n = net_generic(net, vti6_net_id);
-       int err = vti6_dev_init_gen(dev);
-
-       if (err)
-               return err;
 
        t->parms.proto = IPPROTO_IPV6;
        dev_hold(dev);
 
-       vti6_link_config(t);
-
        rcu_assign_pointer(ip6n->tnls_wc[0], t);
        return 0;
 }
@@ -914,6 +905,15 @@ static int vti6_newlink(struct net *src_net, struct net_device *dev,
        return vti6_tnl_create2(dev);
 }
 
+static void vti6_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct net *net = dev_net(dev);
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+       if (dev != ip6n->fb_tnl_dev)
+               unregister_netdevice_queue(dev, head);
+}
+
 static int vti6_changelink(struct net_device *dev, struct nlattr *tb[],
                           struct nlattr *data[])
 {
@@ -989,6 +989,7 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = {
        .setup          = vti6_dev_setup,
        .validate       = vti6_validate,
        .newlink        = vti6_newlink,
+       .dellink        = vti6_dellink,
        .changelink     = vti6_changelink,
        .get_size       = vti6_get_size,
        .fill_info      = vti6_fill_info,
@@ -1029,6 +1030,7 @@ static int __net_init vti6_init_net(struct net *net)
        if (!ip6n->fb_tnl_dev)
                goto err_alloc_dev;
        dev_net_set(ip6n->fb_tnl_dev, net);
+       ip6n->fb_tnl_dev->rtnl_link_ops = &vti6_link_ops;
 
        err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
        if (err < 0)
index 0171f08325c3ff3991485297de4c532fe6992bf8..1a01d79b86980b1bc00d3692a061f614e7107773 100644 (file)
@@ -1439,6 +1439,10 @@ reg_pernet_fail:
 
 void ip6_mr_cleanup(void)
 {
+       rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
+#ifdef CONFIG_IPV6_PIMSM_V2
+       inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
+#endif
        unregister_netdevice_notifier(&ip6_mr_notifier);
        unregister_pernet_subsys(&ip6mr_net_ops);
        kmem_cache_destroy(mrt_cachep);
index 9648de2b67458201ad9435b6972b05191c88c075..ed2c4e400b46bf762ff5379c3d62d69d940e5dd4 100644 (file)
@@ -1550,7 +1550,7 @@ static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
        hdr->daddr = *daddr;
 }
 
-static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
+static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
 {
        struct net_device *dev = idev->dev;
        struct net *net = dev_net(dev);
@@ -1561,13 +1561,13 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
        const struct in6_addr *saddr;
        int hlen = LL_RESERVED_SPACE(dev);
        int tlen = dev->needed_tailroom;
+       unsigned int size = mtu + hlen + tlen;
        int err;
        u8 ra[8] = { IPPROTO_ICMPV6, 0,
                     IPV6_TLV_ROUTERALERT, 2, 0, 0,
                     IPV6_TLV_PADN, 0 };
 
        /* we assume size > sizeof(ra) here */
-       size += hlen + tlen;
        /* limit our allocations to order-0 page */
        size = min_t(int, size, SKB_MAX_ORDER(0, 0));
        skb = sock_alloc_send_skb(sk, size, 1, &err);
@@ -1576,6 +1576,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
                return NULL;
 
        skb->priority = TC_PRIO_CONTROL;
+       skb->reserved_tailroom = skb_end_offset(skb) -
+                                min(mtu, skb_end_offset(skb));
        skb_reserve(skb, hlen);
 
        if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
@@ -1690,8 +1692,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
        return skb;
 }
 
-#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \
-       skb_tailroom(skb)) : 0)
+#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
 
 static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
        int type, int gdeleted, int sdeleted, int crsend)
index 8a7ac685076d91b18baf91e9ab386a98415ee009..529c119cbb14c9eff00c7a5fb63148ef0bc9f740 100644 (file)
@@ -25,6 +25,7 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr,
        struct nf_nat_range range;
        unsigned int verdict;
 
+       memset(&range, 0, sizeof(range));
        range.flags = priv->flags;
 
        verdict = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out);
index 58e5b4710127a996e31b9d2faa97feafc611e39f..a24557a1c1d8b6da7ee51e862ae4b847d9e744d6 100644 (file)
@@ -195,10 +195,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
        struct sit_net *sitn = net_generic(net, sit_net_id);
        int err;
 
-       err = ipip6_tunnel_init(dev);
-       if (err < 0)
-               goto out;
-       ipip6_tunnel_clone_6rd(dev, sitn);
+       memcpy(dev->dev_addr, &t->parms.iph.saddr, 4);
+       memcpy(dev->broadcast, &t->parms.iph.daddr, 4);
 
        if ((__force u16)t->parms.i_flags & SIT_ISATAP)
                dev->priv_flags |= IFF_ISATAP;
@@ -207,7 +205,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
        if (err < 0)
                goto out;
 
-       strcpy(t->parms.name, dev->name);
+       ipip6_tunnel_clone_6rd(dev, sitn);
+
        dev->rtnl_link_ops = &sit_link_ops;
 
        dev_hold(dev);
@@ -1330,6 +1329,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
 }
 
 static const struct net_device_ops ipip6_netdev_ops = {
+       .ndo_init       = ipip6_tunnel_init,
        .ndo_uninit     = ipip6_tunnel_uninit,
        .ndo_start_xmit = sit_tunnel_xmit,
        .ndo_do_ioctl   = ipip6_tunnel_ioctl,
@@ -1378,9 +1378,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
 
        tunnel->dev = dev;
        tunnel->net = dev_net(dev);
-
-       memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
-       memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
+       strcpy(tunnel->parms.name, dev->name);
 
        ipip6_tunnel_bind_dev(dev);
        dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@@ -1405,7 +1403,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
 
        tunnel->dev = dev;
        tunnel->net = dev_net(dev);
-       strcpy(tunnel->parms.name, dev->name);
 
        iph->version            = 4;
        iph->protocol           = IPPROTO_IPV6;
index ace29b60813cf8a1d7182ad2262cbcbd21810fa7..dc495ae2ead05aca0190bcab8c2d95b58beb2ac9 100644 (file)
@@ -903,7 +903,10 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
        if (th->rst)
                return;
 
-       if (!ipv6_unicast_destination(skb))
+       /* If sk not NULL, it means we did a successful lookup and incoming
+        * route had to be correct. prequeue might have dropped our dst.
+        */
+       if (!sk && !ipv6_unicast_destination(skb))
                return;
 
 #ifdef CONFIG_TCP_MD5SIG
index 91729b807c7d041ae379e89df335acefe5218635..1b095ca37aa46bd86a534a5d7eff4824d65b2ef2 100644 (file)
@@ -1764,6 +1764,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
        struct ipxhdr *ipx = NULL;
        struct sk_buff *skb;
        int copied, rc;
+       bool locked = true;
 
        lock_sock(sk);
        /* put the autobinding in */
@@ -1790,6 +1791,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (sock_flag(sk, SOCK_ZAPPED))
                goto out;
 
+       release_sock(sk);
+       locked = false;
        skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
                                flags & MSG_DONTWAIT, &rc);
        if (!skb) {
@@ -1826,7 +1829,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
 out_free:
        skb_free_datagram(sk, skb);
 out:
-       release_sock(sk);
+       if (locked)
+               release_sock(sk);
        return rc;
 }
 
index ec24378caaafaf333152e856aa0e2e920ddbb13f..09d9caaec59112f40b060951ae16796388e2e741 100644 (file)
@@ -53,6 +53,9 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
                __aligned(__alignof__(struct aead_request));
        struct aead_request *aead_req = (void *) aead_req_data;
 
+       if (data_len == 0)
+               return -EINVAL;
+
        memset(aead_req, 0, sizeof(aead_req_data));
 
        sg_init_one(&pt, data, data_len);
index 56b53571c8077ae5a3660449cb0afeb81375028c..509bc157ce5551700db59e380d180c486b44f097 100644 (file)
@@ -805,7 +805,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
 
        memset(&params, 0, sizeof(params));
        memset(&csa_ie, 0, sizeof(csa_ie));
-       err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon,
+       err = ieee80211_parse_ch_switch_ie(sdata, elems,
                                           ifibss->chandef.chan->band,
                                           sta_flags, ifibss->bssid, &csa_ie);
        /* can't switch to destination channel, fail */
index c2aaec4dfcf0ea38da3e45cfdc7b8ad0ecef5fc8..8c68da30595df7793545200c9ce102ae2d5e612d 100644 (file)
@@ -1642,7 +1642,6 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
  * ieee80211_parse_ch_switch_ie - parses channel switch IEs
  * @sdata: the sdata of the interface which has received the frame
  * @elems: parsed 802.11 elements received with the frame
- * @beacon: indicates if the frame was a beacon or probe response
  * @current_band: indicates the current band
  * @sta_flags: contains information about own capabilities and restrictions
  *     to decide which channel switch announcements can be accepted. Only the
@@ -1656,7 +1655,7 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
  * Return: 0 on success, <0 on error and >0 if there is nothing to parse.
  */
 int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
-                                struct ieee802_11_elems *elems, bool beacon,
+                                struct ieee802_11_elems *elems,
                                 enum ieee80211_band current_band,
                                 u32 sta_flags, u8 *bssid,
                                 struct ieee80211_csa_ie *csa_ie);
index af237223a8cd9bd3aa6121576650238d42e3bbb0..653f5eb07a27f4432429a8fbbc4b32e7200cf4c3 100644 (file)
@@ -766,10 +766,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        int i, flushed;
        struct ps_data *ps;
        struct cfg80211_chan_def chandef;
+       bool cancel_scan;
 
        clear_bit(SDATA_STATE_RUNNING, &sdata->state);
 
-       if (rcu_access_pointer(local->scan_sdata) == sdata)
+       cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata;
+       if (cancel_scan)
                ieee80211_scan_cancel(local);
 
        /*
@@ -898,6 +900,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                list_del(&sdata->u.vlan.list);
                mutex_unlock(&local->mtx);
                RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL);
+               /* see comment in the default case below */
+               ieee80211_free_keys(sdata, true);
                /* no need to tell driver */
                break;
        case NL80211_IFTYPE_MONITOR:
@@ -923,17 +927,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                /*
                 * When we get here, the interface is marked down.
                 * Free the remaining keys, if there are any
-                * (shouldn't be, except maybe in WDS mode?)
+                * (which can happen in AP mode if userspace sets
+                * keys before the interface is operating, and maybe
+                * also in WDS mode)
                 *
                 * Force the key freeing to always synchronize_net()
                 * to wait for the RX path in case it is using this
-                * interface enqueuing frames at this very time on
+                * interface enqueuing frames at this very time on
                 * another CPU.
                 */
                ieee80211_free_keys(sdata, true);
-
-               /* fall through */
-       case NL80211_IFTYPE_AP:
                skb_queue_purge(&sdata->skb_queue);
        }
 
@@ -991,6 +994,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_recalc_ps(local, -1);
 
+       if (cancel_scan)
+               flush_delayed_work(&local->scan_work);
+
        if (local->open_count == 0) {
                ieee80211_stop_device(local);
 
index e9f99c1e3fad5905682a61f978d9897833426fd0..0c8b2a77d312d5e3ad18f975ce808c44755c820b 100644 (file)
@@ -874,7 +874,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
 
        memset(&params, 0, sizeof(params));
        memset(&csa_ie, 0, sizeof(csa_ie));
-       err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, band,
+       err = ieee80211_parse_ch_switch_ie(sdata, elems, band,
                                           sta_flags, sdata->vif.addr,
                                           &csa_ie);
        if (err < 0)
index 2de88704278b85b8d9ce47f2ef77905108d456e7..93af0f1c9d991a52d82cbc58bcd842415b22a04a 100644 (file)
@@ -1072,7 +1072,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
 
        current_band = cbss->channel->band;
        memset(&csa_ie, 0, sizeof(csa_ie));
-       res = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, current_band,
+       res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
                                           ifmgd->flags,
                                           ifmgd->associated->bssid, &csa_ie);
        if (res < 0)
@@ -1168,7 +1168,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work);
        else
                mod_timer(&ifmgd->chswitch_timer,
-                         TU_TO_EXP_TIME(csa_ie.count * cbss->beacon_interval));
+                         TU_TO_EXP_TIME((csa_ie.count - 1) *
+                                        cbss->beacon_interval));
 }
 
 static bool
index df90ce2db00c042a31bf3cd79dab3cfefcacbc35..408fd8ab4eef7eaf5f39d643644969a3943c1b3f 100644 (file)
@@ -252,19 +252,16 @@ minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index,
        cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp;
        cur_prob = mi->groups[cur_group].rates[cur_idx].probability;
 
-       tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
-       tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
-       tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
-       tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
-
-       while (j > 0 && (cur_thr > tmp_thr ||
-             (cur_thr == tmp_thr && cur_prob > tmp_prob))) {
-               j--;
+       do {
                tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
                tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
                tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
                tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
-       }
+               if (cur_thr < tmp_thr ||
+                   (cur_thr == tmp_thr && cur_prob <= tmp_prob))
+                       break;
+               j--;
+       } while (j > 0);
 
        if (j < MAX_THR_RATES - 1) {
                memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) *
index b04ca4049c95f276aa4627501d5ced01e8a3b878..a37f9af634cb6d3c7bb73ada859473e0e085402d 100644 (file)
@@ -1678,11 +1678,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
        sc = le16_to_cpu(hdr->seq_ctrl);
        frag = sc & IEEE80211_SCTL_FRAG;
 
-       if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
-                  is_multicast_ether_addr(hdr->addr1))) {
-               /* not fragmented */
+       if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
+               goto out;
+
+       if (is_multicast_ether_addr(hdr->addr1)) {
+               rx->local->dot11MulticastReceivedFrameCount++;
                goto out;
        }
+
        I802_DEBUG_INC(rx->local->rx_handlers_fragments);
 
        if (skb_linearize(rx->skb))
@@ -1775,10 +1778,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
  out:
        if (rx->sta)
                rx->sta->rx_packets++;
-       if (is_multicast_ether_addr(hdr->addr1))
-               rx->local->dot11MulticastReceivedFrameCount++;
-       else
-               ieee80211_led_rx(rx->local);
+       ieee80211_led_rx(rx->local);
        return RX_CONTINUE;
 }
 
index 6ab00907008461fb14d551b2633a8dbcd4fef623..efeba56c913bae0d7284bfc74862599a2ad2298b 100644 (file)
@@ -22,7 +22,7 @@
 #include "wme.h"
 
 int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
-                                struct ieee802_11_elems *elems, bool beacon,
+                                struct ieee802_11_elems *elems,
                                 enum ieee80211_band current_band,
                                 u32 sta_flags, u8 *bssid,
                                 struct ieee80211_csa_ie *csa_ie)
@@ -91,19 +91,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
                return -EINVAL;
        }
 
-       if (!beacon && sec_chan_offs) {
+       if (sec_chan_offs) {
                secondary_channel_offset = sec_chan_offs->sec_chan_offs;
-       } else if (beacon && ht_oper) {
-               secondary_channel_offset =
-                       ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
        } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) {
-               /* If it's not a beacon, HT is enabled and the IE not present,
-                * it's 20 MHz, 802.11-2012 8.5.2.6:
-                *      This element [the Secondary Channel Offset Element] is
-                *      present when switching to a 40 MHz channel. It may be
-                *      present when switching to a 20 MHz channel (in which
-                *      case the secondary channel offset is set to SCN).
-                */
+               /* If the secondary channel offset IE is not present,
+                * we can't know what's the post-CSA offset, so the
+                * best we can do is use 20MHz.
+               */
                secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
        }
 
index 86f9d76b1464b1b0d5e8b5fc9f9c0c129713bd57..d259da3ce67a6b18f6e4a345a5729876e373efc0 100644 (file)
@@ -1863,6 +1863,12 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
        if (*op < IP_SET_OP_VERSION) {
                /* Check the version at the beginning of operations */
                struct ip_set_req_version *req_version = data;
+
+               if (*len < sizeof(struct ip_set_req_version)) {
+                       ret = -EINVAL;
+                       goto done;
+               }
+
                if (req_version->version != IPSET_PROTOCOL) {
                        ret = -EPROTO;
                        goto done;
index 437a3663ad0346e13cfbc0017be20b4bad73c218..bd90bf8107dacb4e7e0683110bb2146faf168b26 100644 (file)
@@ -846,6 +846,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
                new_skb = skb_realloc_headroom(skb, max_headroom);
                if (!new_skb)
                        goto error;
+               if (skb->sk)
+                       skb_set_owner_w(new_skb, skb->sk);
                consume_skb(skb);
                skb = new_skb;
        }
index 11ab4b078f3bb68323b1f050f3a1d5e83d271f36..66e8425dbfe77b11e41eb7c97c68f3778ee0c9e7 100644 (file)
@@ -3484,13 +3484,8 @@ static void nft_chain_commit_update(struct nft_trans *trans)
        }
 }
 
-/* Schedule objects for release via rcu to make sure no packets are accesing
- * removed rules.
- */
-static void nf_tables_commit_release_rcu(struct rcu_head *rt)
+static void nf_tables_commit_release(struct nft_trans *trans)
 {
-       struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head);
-
        switch (trans->msg_type) {
        case NFT_MSG_DELTABLE:
                nf_tables_table_destroy(&trans->ctx);
@@ -3612,10 +3607,11 @@ static int nf_tables_commit(struct sk_buff *skb)
                }
        }
 
+       synchronize_rcu();
+
        list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
                list_del(&trans->list);
-               trans->ctx.nla = NULL;
-               call_rcu(&trans->rcu_head, nf_tables_commit_release_rcu);
+               nf_tables_commit_release(trans);
        }
 
        nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
@@ -3623,13 +3619,8 @@ static int nf_tables_commit(struct sk_buff *skb)
        return 0;
 }
 
-/* Schedule objects for release via rcu to make sure no packets are accesing
- * aborted rules.
- */
-static void nf_tables_abort_release_rcu(struct rcu_head *rt)
+static void nf_tables_abort_release(struct nft_trans *trans)
 {
-       struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head);
-
        switch (trans->msg_type) {
        case NFT_MSG_NEWTABLE:
                nf_tables_table_destroy(&trans->ctx);
@@ -3725,11 +3716,12 @@ static int nf_tables_abort(struct sk_buff *skb)
                }
        }
 
+       synchronize_rcu();
+
        list_for_each_entry_safe_reverse(trans, next,
                                         &net->nft.commit_list, list) {
                list_del(&trans->list);
-               trans->ctx.nla = NULL;
-               call_rcu(&trans->rcu_head, nf_tables_abort_release_rcu);
+               nf_tables_abort_release(trans);
        }
 
        return 0;
index 6c5a915cfa758bb4d8187dac9dc606201e99b458..13c2e17bbe279e6660a0a04fc804a6e1dd0a7707 100644 (file)
@@ -47,6 +47,8 @@ static const int nfnl_group2type[NFNLGRP_MAX+1] = {
        [NFNLGRP_CONNTRACK_EXP_NEW]     = NFNL_SUBSYS_CTNETLINK_EXP,
        [NFNLGRP_CONNTRACK_EXP_UPDATE]  = NFNL_SUBSYS_CTNETLINK_EXP,
        [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP,
+       [NFNLGRP_NFTABLES]              = NFNL_SUBSYS_NFTABLES,
+       [NFNLGRP_ACCT_QUOTA]            = NFNL_SUBSYS_ACCT,
 };
 
 void nfnl_lock(__u8 subsys_id)
@@ -464,7 +466,12 @@ static void nfnetlink_rcv(struct sk_buff *skb)
 static int nfnetlink_bind(int group)
 {
        const struct nfnetlink_subsystem *ss;
-       int type = nfnl_group2type[group];
+       int type;
+
+       if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
+               return -EINVAL;
+
+       type = nfnl_group2type[group];
 
        rcu_read_lock();
        ss = nfnetlink_get_subsys(type);
@@ -514,6 +521,9 @@ static int __init nfnetlink_init(void)
 {
        int i;
 
+       for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++)
+               BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE);
+
        for (i=0; i<NFNL_SUBSYS_COUNT; i++)
                mutex_init(&table[i].mutex);
 
index 9d6d6f60a80fc6b23da9bb140c90e085a4fa675a..265e190f22187d83de1a9ed07913ef153cf1f03f 100644 (file)
 #include <linux/netfilter_ipv6/ip6_tables.h>
 #include <net/netfilter/nf_tables.h>
 
-static const struct {
-       const char      *name;
-       u8              type;
-} table_to_chaintype[] = {
-       { "filter",     NFT_CHAIN_T_DEFAULT },
-       { "raw",        NFT_CHAIN_T_DEFAULT },
-       { "security",   NFT_CHAIN_T_DEFAULT },
-       { "mangle",     NFT_CHAIN_T_ROUTE },
-       { "nat",        NFT_CHAIN_T_NAT },
-       { },
-};
-
-static int nft_compat_table_to_chaintype(const char *table)
-{
-       int i;
-
-       for (i = 0; table_to_chaintype[i].name != NULL; i++) {
-               if (strcmp(table_to_chaintype[i].name, table) == 0)
-                       return table_to_chaintype[i].type;
-       }
-
-       return -1;
-}
-
 static int nft_compat_chain_validate_dependency(const char *tablename,
                                                const struct nft_chain *chain)
 {
-       enum nft_chain_type type;
        const struct nft_base_chain *basechain;
 
        if (!tablename || !(chain->flags & NFT_BASE_CHAIN))
                return 0;
 
-       type = nft_compat_table_to_chaintype(tablename);
-       if (type < 0)
-               return -EINVAL;
-
        basechain = nft_base_chain(chain);
-       if (basechain->type->type != type)
+       if (strcmp(tablename, "nat") == 0 &&
+           basechain->type->type != NFT_CHAIN_T_NAT)
                return -EINVAL;
 
        return 0;
@@ -117,7 +89,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
                           struct xt_target *target, void *info,
                           union nft_entry *entry, u8 proto, bool inv)
 {
-       par->net        = &init_net;
+       par->net        = ctx->net;
        par->table      = ctx->table->name;
        switch (ctx->afi->family) {
        case AF_INET:
@@ -324,7 +296,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
                          struct xt_match *match, void *info,
                          union nft_entry *entry, u8 proto, bool inv)
 {
-       par->net        = &init_net;
+       par->net        = ctx->net;
        par->table      = ctx->table->name;
        switch (ctx->afi->family) {
        case AF_INET:
@@ -374,7 +346,7 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
        union nft_entry e = {};
        int ret;
 
-       ret = nft_compat_chain_validate_dependency(match->name, ctx->chain);
+       ret = nft_compat_chain_validate_dependency(match->table, ctx->chain);
        if (ret < 0)
                goto err;
 
@@ -448,7 +420,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
                if (!(hook_mask & match->hooks))
                        return -EINVAL;
 
-               ret = nft_compat_chain_validate_dependency(match->name,
+               ret = nft_compat_chain_validate_dependency(match->table,
                                                           ctx->chain);
                if (ret < 0)
                        return ret;
index f1de72de273e20e7422bf6de42ebbca712affacf..0007b818039708bf447d74252712d93f9a8889e0 100644 (file)
@@ -1440,7 +1440,7 @@ static void netlink_unbind(int group, long unsigned int groups,
                return;
 
        for (undo = 0; undo < group; undo++)
-               if (test_bit(group, &groups))
+               if (test_bit(undo, &groups))
                        nlk->netlink_unbind(undo);
 }
 
@@ -1492,7 +1492,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
                        netlink_insert(sk, net, nladdr->nl_pid) :
                        netlink_autobind(sock);
                if (err) {
-                       netlink_unbind(nlk->ngroups - 1, groups, nlk);
+                       netlink_unbind(nlk->ngroups, groups, nlk);
                        return err;
                }
        }
@@ -2509,6 +2509,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
                nl_table[unit].module = module;
                if (cfg) {
                        nl_table[unit].bind = cfg->bind;
+                       nl_table[unit].unbind = cfg->unbind;
                        nl_table[unit].flags = cfg->flags;
                        if (cfg->compare)
                                nl_table[unit].compare = cfg->compare;
index 006886dbee36b075fc7054ec1ddc87781a75d739..8c4229b11c34c617a81e58dcdb1a4a6034fd064c 100644 (file)
@@ -246,11 +246,11 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
 {
        int transport_len = skb->len - skb_transport_offset(skb);
 
-       if (l4_proto == IPPROTO_TCP) {
+       if (l4_proto == NEXTHDR_TCP) {
                if (likely(transport_len >= sizeof(struct tcphdr)))
                        inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
                                                  addr, new_addr, 1);
-       } else if (l4_proto == IPPROTO_UDP) {
+       } else if (l4_proto == NEXTHDR_UDP) {
                if (likely(transport_len >= sizeof(struct udphdr))) {
                        struct udphdr *uh = udp_hdr(skb);
 
@@ -261,6 +261,10 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
                                        uh->check = CSUM_MANGLED_0;
                        }
                }
+       } else if (l4_proto == NEXTHDR_ICMP) {
+               if (likely(transport_len >= sizeof(struct icmp6hdr)))
+                       inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
+                                                 skb, addr, new_addr, 1);
        }
 }
 
@@ -722,8 +726,6 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
 
                case OVS_ACTION_ATTR_SAMPLE:
                        err = sample(dp, skb, key, a);
-                       if (unlikely(err)) /* skb already freed. */
-                               return err;
                        break;
                }
 
index e6d7255183eba31267ec29705441a019681ca617..f9e556b5608650e490ad1608f5a28280f55d3d08 100644 (file)
@@ -1265,7 +1265,7 @@ static size_t ovs_dp_cmd_msg_size(void)
        return msgsize;
 }
 
-/* Called with ovs_mutex or RCU read lock. */
+/* Called with ovs_mutex. */
 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
                                u32 portid, u32 seq, u32 flags, u8 cmd)
 {
@@ -1555,7 +1555,7 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
        if (!reply)
                return -ENOMEM;
 
-       rcu_read_lock();
+       ovs_lock();
        dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
        if (IS_ERR(dp)) {
                err = PTR_ERR(dp);
@@ -1564,12 +1564,12 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
        err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
                                   info->snd_seq, 0, OVS_DP_CMD_NEW);
        BUG_ON(err < 0);
-       rcu_read_unlock();
+       ovs_unlock();
 
        return genlmsg_reply(reply, info);
 
 err_unlock_free:
-       rcu_read_unlock();
+       ovs_unlock();
        kfree_skb(reply);
        return err;
 }
@@ -1581,8 +1581,8 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
        int skip = cb->args[0];
        int i = 0;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) {
+       ovs_lock();
+       list_for_each_entry(dp, &ovs_net->dps, list_node) {
                if (i >= skip &&
                    ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
                                         cb->nlh->nlmsg_seq, NLM_F_MULTI,
@@ -1590,7 +1590,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        break;
                i++;
        }
-       rcu_read_unlock();
+       ovs_unlock();
 
        cb->args[0] = i;
 
index 939bcb32100fe861b4ad94255d191ee281f83ed3..089b195c064ae56445b458e52e0dcaefa90b615c 100644 (file)
@@ -145,7 +145,7 @@ static bool match_validate(const struct sw_flow_match *match,
        if (match->key->eth.type == htons(ETH_P_ARP)
                        || match->key->eth.type == htons(ETH_P_RARP)) {
                key_expected |= 1 << OVS_KEY_ATTR_ARP;
-               if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+               if (match->mask && (match->mask->key.tp.src == htons(0xff)))
                        mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
        }
 
@@ -689,6 +689,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
                                ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
                        return -EINVAL;
                }
+
+               if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) {
+                       OVS_NLERR("IPv6 flow label %x is out of range (max=%x).\n",
+                                 ntohl(ipv6_key->ipv6_label), (1 << 20) - 1);
+                       return -EINVAL;
+               }
+
                SW_FLOW_KEY_PUT(match, ipv6.label,
                                ipv6_key->ipv6_label, is_mask);
                SW_FLOW_KEY_PUT(match, ip.proto,
index 87d20f48ff06195766e8ecd20a3fcfae5ccae690..07c04a841ba09c5ee5b0bc7718d4e6c87ac561e1 100644 (file)
@@ -378,7 +378,7 @@ static void unregister_prot_hook(struct sock *sk, bool sync)
                __unregister_prot_hook(sk, sync);
 }
 
-static inline __pure struct page *pgv_to_page(void *addr)
+static inline struct page * __pure pgv_to_page(void *addr)
 {
        if (is_vmalloc_addr(addr))
                return vmalloc_to_page(addr);
index 0e8529113dc5a009c89bab8be3c87ce0ddbcce49..fb7976aee61c84f38aecdc5c5f0d8be20e577fa9 100644 (file)
@@ -862,8 +862,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
                list_add(&cur_key->key_list, sh_keys);
 
        cur_key->key = key;
-       sctp_auth_key_hold(key);
-
        return 0;
 nomem:
        if (!replace)
index ab734be8cb209864910f2fd667f2a6a27266f3af..9f32741abb1c7b142265297dc2fac78b74b3d195 100644 (file)
@@ -2609,6 +2609,9 @@ do_addr_param:
                addr_param = param.v + sizeof(sctp_addip_param_t);
 
                af = sctp_get_af_specific(param_type2af(param.p->type));
+               if (af == NULL)
+                       break;
+
                af->from_addr_param(&addr, addr_param,
                                    htons(asoc->peer.port), 0);
 
index afb292cd797decf08561492925d87d34d09e485b..53ed8d3f88970a877b2799e6ecb2dc9de6893304 100644 (file)
@@ -1353,6 +1353,7 @@ gss_stringify_acceptor(struct rpc_cred *cred)
        char *string = NULL;
        struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
        struct gss_cl_ctx *ctx;
+       unsigned int len;
        struct xdr_netobj *acceptor;
 
        rcu_read_lock();
@@ -1360,15 +1361,39 @@ gss_stringify_acceptor(struct rpc_cred *cred)
        if (!ctx)
                goto out;
 
-       acceptor = &ctx->gc_acceptor;
+       len = ctx->gc_acceptor.len;
+       rcu_read_unlock();
 
        /* no point if there's no string */
-       if (!acceptor->len)
-               goto out;
-
-       string = kmalloc(acceptor->len + 1, GFP_KERNEL);
+       if (!len)
+               return NULL;
+realloc:
+       string = kmalloc(len + 1, GFP_KERNEL);
        if (!string)
+               return NULL;
+
+       rcu_read_lock();
+       ctx = rcu_dereference(gss_cred->gc_ctx);
+
+       /* did the ctx disappear or was it replaced by one with no acceptor? */
+       if (!ctx || !ctx->gc_acceptor.len) {
+               kfree(string);
+               string = NULL;
                goto out;
+       }
+
+       acceptor = &ctx->gc_acceptor;
+
+       /*
+        * Did we find a new acceptor that's longer than the original? Allocate
+        * a longer buffer and try again.
+        */
+       if (len < acceptor->len) {
+               len = acceptor->len;
+               rcu_read_unlock();
+               kfree(string);
+               goto realloc;
+       }
 
        memcpy(string, acceptor->data, acceptor->len);
        string[acceptor->len] = '\0';
index 3f959c681885ba41ccdf6300bb27a35745ec11be..f9c052d508f008c4fb74567ea8cbad13f305d497 100644 (file)
@@ -1019,17 +1019,12 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
        xid = *p++;
        calldir = *p;
 
-       if (bc_xprt)
-               req = xprt_lookup_rqst(bc_xprt, xid);
-
-       if (!req) {
-               printk(KERN_NOTICE
-                       "%s: Got unrecognized reply: "
-                       "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
-                       __func__, ntohl(calldir),
-                       bc_xprt, ntohl(xid));
+       if (!bc_xprt)
                return -EAGAIN;
-       }
+       spin_lock_bh(&bc_xprt->transport_lock);
+       req = xprt_lookup_rqst(bc_xprt, xid);
+       if (!req)
+               goto unlock_notfound;
 
        memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
        /*
@@ -1040,11 +1035,21 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
        dst = &req->rq_private_buf.head[0];
        src = &rqstp->rq_arg.head[0];
        if (dst->iov_len < src->iov_len)
-               return -EAGAIN; /* whatever; just giving up. */
+               goto unlock_eagain; /* whatever; just giving up. */
        memcpy(dst->iov_base, src->iov_base, src->iov_len);
        xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
        rqstp->rq_arg.len = 0;
+       spin_unlock_bh(&bc_xprt->transport_lock);
        return 0;
+unlock_notfound:
+       printk(KERN_NOTICE
+               "%s: Got unrecognized reply: "
+               "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
+               __func__, ntohl(calldir),
+               bc_xprt, ntohl(xid));
+unlock_eagain:
+       spin_unlock_bh(&bc_xprt->transport_lock);
+       return -EAGAIN;
 }
 
 static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len)
index b8960c4959a5e53635180054d27d788105ebf134..200e37867336a3c2903437e97f591fbc302e15b7 100644 (file)
@@ -117,6 +117,7 @@ struct keyring_search_context {
 #define KEYRING_SEARCH_NO_UPDATE_TIME  0x0004  /* Don't update times */
 #define KEYRING_SEARCH_NO_CHECK_PERM   0x0008  /* Don't check permissions */
 #define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010  /* Give an error on excessive depth */
+#define KEYRING_SEARCH_SKIP_EXPIRED    0x0020  /* Ignore expired keys (intention to replace) */
 
        int (*iterator)(const void *object, void *iterator_data);
 
index eff88a5f5d40da17611d381bcf4e219a90bcc972..4743d71e4aa6dd12f2456a5f00496c1222775c6a 100644 (file)
@@ -26,6 +26,8 @@
 #include <asm/uaccess.h>
 #include "internal.h"
 
+#define KEY_MAX_DESC_SIZE 4096
+
 static int key_get_type_from_user(char *type,
                                  const char __user *_type,
                                  unsigned len)
@@ -78,7 +80,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
 
        description = NULL;
        if (_description) {
-               description = strndup_user(_description, PAGE_SIZE);
+               description = strndup_user(_description, KEY_MAX_DESC_SIZE);
                if (IS_ERR(description)) {
                        ret = PTR_ERR(description);
                        goto error;
@@ -177,7 +179,7 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
                goto error;
 
        /* pull the description into kernel space */
-       description = strndup_user(_description, PAGE_SIZE);
+       description = strndup_user(_description, KEY_MAX_DESC_SIZE);
        if (IS_ERR(description)) {
                ret = PTR_ERR(description);
                goto error;
@@ -287,7 +289,7 @@ long keyctl_join_session_keyring(const char __user *_name)
        /* fetch the name from userspace */
        name = NULL;
        if (_name) {
-               name = strndup_user(_name, PAGE_SIZE);
+               name = strndup_user(_name, KEY_MAX_DESC_SIZE);
                if (IS_ERR(name)) {
                        ret = PTR_ERR(name);
                        goto error;
@@ -562,8 +564,9 @@ long keyctl_describe_key(key_serial_t keyid,
 {
        struct key *key, *instkey;
        key_ref_t key_ref;
-       char *tmpbuf;
+       char *infobuf;
        long ret;
+       int desclen, infolen;
 
        key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
        if (IS_ERR(key_ref)) {
@@ -586,38 +589,31 @@ long keyctl_describe_key(key_serial_t keyid,
        }
 
 okay:
-       /* calculate how much description we're going to return */
-       ret = -ENOMEM;
-       tmpbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!tmpbuf)
-               goto error2;
-
        key = key_ref_to_ptr(key_ref);
+       desclen = strlen(key->description);
 
-       ret = snprintf(tmpbuf, PAGE_SIZE - 1,
-                      "%s;%d;%d;%08x;%s",
-                      key->type->name,
-                      from_kuid_munged(current_user_ns(), key->uid),
-                      from_kgid_munged(current_user_ns(), key->gid),
-                      key->perm,
-                      key->description ?: "");
-
-       /* include a NUL char at the end of the data */
-       if (ret > PAGE_SIZE - 1)
-               ret = PAGE_SIZE - 1;
-       tmpbuf[ret] = 0;
-       ret++;
+       /* calculate how much information we're going to return */
+       ret = -ENOMEM;
+       infobuf = kasprintf(GFP_KERNEL,
+                           "%s;%d;%d;%08x;",
+                           key->type->name,
+                           from_kuid_munged(current_user_ns(), key->uid),
+                           from_kgid_munged(current_user_ns(), key->gid),
+                           key->perm);
+       if (!infobuf)
+               goto error2;
+       infolen = strlen(infobuf);
+       ret = infolen + desclen + 1;
 
        /* consider returning the data */
-       if (buffer && buflen > 0) {
-               if (buflen > ret)
-                       buflen = ret;
-
-               if (copy_to_user(buffer, tmpbuf, buflen) != 0)
+       if (buffer && buflen >= ret) {
+               if (copy_to_user(buffer, infobuf, infolen) != 0 ||
+                   copy_to_user(buffer + infolen, key->description,
+                                desclen + 1) != 0)
                        ret = -EFAULT;
        }
 
-       kfree(tmpbuf);
+       kfree(infobuf);
 error2:
        key_ref_put(key_ref);
 error:
@@ -649,7 +645,7 @@ long keyctl_keyring_search(key_serial_t ringid,
        if (ret < 0)
                goto error;
 
-       description = strndup_user(_description, PAGE_SIZE);
+       description = strndup_user(_description, KEY_MAX_DESC_SIZE);
        if (IS_ERR(description)) {
                ret = PTR_ERR(description);
                goto error;
index 8177010174f7b3d47773a43e48bf2b171b264c5f..e72548b5897ec237dd7463374871538c81a84fd7 100644 (file)
@@ -546,7 +546,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
                }
 
                if (key->expiry && ctx->now.tv_sec >= key->expiry) {
-                       ctx->result = ERR_PTR(-EKEYEXPIRED);
+                       if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
+                               ctx->result = ERR_PTR(-EKEYEXPIRED);
                        kleave(" = %d [expire]", ctx->skipped_ret);
                        goto skipped;
                }
@@ -628,6 +629,10 @@ static bool search_nested_keyrings(struct key *keyring,
               ctx->index_key.type->name,
               ctx->index_key.description);
 
+#define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK)
+       BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
+              (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
+
        if (ctx->index_key.description)
                ctx->index_key.desc_len = strlen(ctx->index_key.description);
 
@@ -637,7 +642,6 @@ static bool search_nested_keyrings(struct key *keyring,
        if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE ||
            keyring_compare_object(keyring, &ctx->index_key)) {
                ctx->skipped_ret = 2;
-               ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK;
                switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
                case 1:
                        goto found;
@@ -649,8 +653,6 @@ static bool search_nested_keyrings(struct key *keyring,
        }
 
        ctx->skipped_ret = 0;
-       if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK)
-               ctx->flags &= ~KEYRING_SEARCH_DO_STATE_CHECK;
 
        /* Start processing a new keyring */
 descend_to_keyring:
index bb4337c7ae1b3978fd5e36d692d8cacf27b89816..0c7aea4dea54d8d299edc09b38c9a8f7b5c82be8 100644 (file)
@@ -516,6 +516,8 @@ struct key *request_key_and_link(struct key_type *type,
                .match_data.cmp         = key_default_cmp,
                .match_data.raw_data    = description,
                .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+               .flags                  = (KEYRING_SEARCH_DO_STATE_CHECK |
+                                          KEYRING_SEARCH_SKIP_EXPIRED),
        };
        struct key *key;
        key_ref_t key_ref;
index 6639e2cb885322c6a43924496b2a68be25b9a5e6..5d672f7580dd5330516dd62f7d705cac46c319fb 100644 (file)
@@ -249,6 +249,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
                .match_data.cmp         = key_default_cmp,
                .match_data.raw_data    = description,
                .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+               .flags                  = KEYRING_SEARCH_DO_STATE_CHECK,
        };
        struct key *authkey;
        key_ref_t authkey_ref;
index e66314138b3822036968abb52b672dcfc02c73ee..c603b20356ade4ca57c42a0cc7260f945d2c4ff5 100644 (file)
@@ -4725,9 +4725,10 @@ static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
        err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
        if (err) {
                if (err == -EINVAL) {
-                       WARN_ONCE(1, "selinux_nlmsg_perm: unrecognized netlink message:"
-                                 " protocol=%hu nlmsg_type=%hu sclass=%hu\n",
-                                 sk->sk_protocol, nlh->nlmsg_type, sksec->sclass);
+                       printk(KERN_WARNING
+                              "SELinux: unrecognized netlink message:"
+                              " protocol=%hu nlmsg_type=%hu sclass=%hu\n",
+                              sk->sk_protocol, nlh->nlmsg_type, sksec->sclass);
                        if (!selinux_enforcing || security_get_allow_unknown())
                                err = 0;
                }
index 42ded997b223b7ece3d8535000d4defeea2ba8f5..c6ff94ab1ad65a883e5b969437d05afb837e1a02 100644 (file)
@@ -216,6 +216,8 @@ static char *snd_pcm_format_names[] = {
        FORMAT(DSD_U8),
        FORMAT(DSD_U16_LE),
        FORMAT(DSD_U32_LE),
+       FORMAT(DSD_U16_BE),
+       FORMAT(DSD_U32_BE),
 };
 
 const char *snd_pcm_format_name(snd_pcm_format_t format)
index ae7a0feb3b76001f54555187c19343bce352f0c8..ebe8444de6c6ea8f44a5cacfb39b963939d9880d 100644 (file)
@@ -152,6 +152,14 @@ static struct pcm_format_data pcm_formats[(INT)SNDRV_PCM_FORMAT_LAST+1] = {
                .width = 32, .phys = 32, .le = 1, .signd = 0,
                .silence = { 0x69, 0x69, 0x69, 0x69 },
        },
+       [SNDRV_PCM_FORMAT_DSD_U16_BE] = {
+               .width = 16, .phys = 16, .le = 0, .signd = 0,
+               .silence = { 0x69, 0x69 },
+       },
+       [SNDRV_PCM_FORMAT_DSD_U32_BE] = {
+               .width = 32, .phys = 32, .le = 0, .signd = 0,
+               .silence = { 0x69, 0x69, 0x69, 0x69 },
+       },
        /* FIXME: the following three formats are not defined properly yet */
        [SNDRV_PCM_FORMAT_MPEG] = {
                .le = -1, .signd = -1,
index 9ab1e631cb32244262d9a0cfe5009adfe4677aca..48b6c5a3884f3b1ed729d542fd286ba2840a938b 100644 (file)
@@ -219,6 +219,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
                         "{Intel, LPT_LP},"
                         "{Intel, WPT_LP},"
                         "{Intel, SPT},"
+                        "{Intel, SPT_LP},"
                         "{Intel, HPT},"
                         "{Intel, PBG},"
                         "{Intel, SCH},"
@@ -297,7 +298,8 @@ enum {
 
 /* quirks for ATI/AMD HDMI */
 #define AZX_DCAPS_PRESET_ATI_HDMI \
-       (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB)
+       (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB|\
+        AZX_DCAPS_NO_MSI64)
 
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
@@ -1485,6 +1487,7 @@ static int azx_first_init(struct azx *chip)
        struct snd_card *card = chip->card;
        int err;
        unsigned short gcap;
+       unsigned int dma_bits = 64;
 
 #if BITS_PER_LONG != 64
        /* Fix up base address on ULI M5461 */
@@ -1508,9 +1511,14 @@ static int azx_first_init(struct azx *chip)
                return -ENXIO;
        }
 
-       if (chip->msi)
+       if (chip->msi) {
+               if (chip->driver_caps & AZX_DCAPS_NO_MSI64) {
+                       dev_dbg(card->dev, "Disabling 64bit MSI\n");
+                       pci->no_64bit_msi = true;
+               }
                if (pci_enable_msi(pci) < 0)
                        chip->msi = 0;
+       }
 
        if (azx_acquire_irq(chip, 0) < 0)
                return -EBUSY;
@@ -1521,9 +1529,14 @@ static int azx_first_init(struct azx *chip)
        gcap = azx_readw(chip, GCAP);
        dev_dbg(card->dev, "chipset global capabilities = 0x%x\n", gcap);
 
+       /* AMD devices support 40 or 48bit DMA, take the safe one */
+       if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
+               dma_bits = 40;
+
        /* disable SB600 64bit support for safety */
        if (chip->pci->vendor == PCI_VENDOR_ID_ATI) {
                struct pci_dev *p_smbus;
+               dma_bits = 40;
                p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
                                         PCI_DEVICE_ID_ATI_SBX00_SMBUS,
                                         NULL);
@@ -1553,9 +1566,11 @@ static int azx_first_init(struct azx *chip)
        }
 
        /* allow 64bit DMA address if supported by H/W */
-       if ((gcap & AZX_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
-               pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64));
-       else {
+       if (!(gcap & AZX_GCAP_64OK))
+               dma_bits = 32;
+       if (!pci_set_dma_mask(pci, DMA_BIT_MASK(dma_bits))) {
+               pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(dma_bits));
+       } else {
                pci_set_dma_mask(pci, DMA_BIT_MASK(32));
                pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32));
        }
@@ -2004,6 +2019,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Sunrise Point */
        { PCI_DEVICE(0x8086, 0xa170),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+       /* Sunrise Point-LP */
+       { PCI_DEVICE(0x8086, 0x9d70),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
        /* Haswell */
        { PCI_DEVICE(0x8086, 0x0a0c),
          .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
index 949cd437eeb264798aec5d9b2f5c5e61a87fc294..5016014e57f2f1dc65f68d5b71db8d12f065493f 100644 (file)
@@ -171,6 +171,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
 #define AZX_DCAPS_PM_RUNTIME   (1 << 26)       /* runtime PM support */
 #define AZX_DCAPS_I915_POWERWELL (1 << 27)     /* HSW i915 powerwell support */
 #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28)  /* CORBRP clears itself after reset */
+#define AZX_DCAPS_NO_MSI64      (1 << 29)      /* Stick to 32-bit MSIs */
 
 /* HD Audio class code */
 #define PCI_CLASS_MULTIMEDIA_HD_AUDIO  0x0403
index 71e4bad06345c856fcfb80e9e75578d444db117e..e9ebc7bd752cae1afdf95c0b2d2e0e8a15d2393b 100644 (file)
@@ -43,6 +43,7 @@ struct conexant_spec {
        unsigned int num_eapds;
        hda_nid_t eapds[4];
        bool dynamic_eapd;
+       hda_nid_t mute_led_eapd;
 
        unsigned int parse_flags; /* flag for snd_hda_parse_pin_defcfg() */
 
@@ -163,6 +164,17 @@ static void cx_auto_vmaster_hook(void *private_data, int enabled)
        cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, enabled);
 }
 
+/* turn on/off EAPD according to Master switch (inversely!) for mute LED */
+static void cx_auto_vmaster_hook_mute_led(void *private_data, int enabled)
+{
+       struct hda_codec *codec = private_data;
+       struct conexant_spec *spec = codec->spec;
+
+       snd_hda_codec_write(codec, spec->mute_led_eapd, 0,
+                           AC_VERB_SET_EAPD_BTLENABLE,
+                           enabled ? 0x00 : 0x02);
+}
+
 static int cx_auto_build_controls(struct hda_codec *codec)
 {
        int err;
@@ -223,6 +235,7 @@ enum {
        CXT_FIXUP_TOSHIBA_P105,
        CXT_FIXUP_HP_530,
        CXT_FIXUP_CAP_MIX_AMP_5047,
+       CXT_FIXUP_MUTE_LED_EAPD,
 };
 
 /* for hda_fixup_thinkpad_acpi() */
@@ -557,6 +570,18 @@ static void cxt_fixup_olpc_xo(struct hda_codec *codec,
        }
 }
 
+static void cxt_fixup_mute_led_eapd(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       struct conexant_spec *spec = codec->spec;
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               spec->mute_led_eapd = 0x1b;
+               spec->dynamic_eapd = 1;
+               spec->gen.vmaster_mute.hook = cx_auto_vmaster_hook_mute_led;
+       }
+}
+
 /*
  * Fix max input level on mixer widget to 0dB
  * (originally it has 0x2b steps with 0dB offset 0x14)
@@ -705,6 +730,10 @@ static const struct hda_fixup cxt_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cxt_fixup_cap_mix_amp_5047,
        },
+       [CXT_FIXUP_MUTE_LED_EAPD] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cxt_fixup_mute_led_eapd,
+       },
 };
 
 static const struct snd_pci_quirk cxt5045_fixups[] = {
@@ -762,6 +791,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
        SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
        SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
+       SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
        SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
@@ -780,6 +810,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
        { .id = CXT_PINCFG_LEMOTE_A1004, .name = "lemote-a1004" },
        { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
        { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
+       { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" },
        {}
 };
 
index c9cf248ce8ecbec0ec6f955da1c8d263aeed170f..b118a5be18df7ea2a9bfeab4997c677e1ae176b5 100644 (file)
@@ -288,21 +288,91 @@ static void alc880_unsol_event(struct hda_codec *codec, unsigned int res)
        snd_hda_jack_unsol_event(codec, res >> 2);
 }
 
-/* additional initialization for ALC888 variants */
-static void alc888_coef_init(struct hda_codec *codec)
+/* Change EAPD to verb control */
+static void alc_fill_eapd_coef(struct hda_codec *codec)
 {
-       if (alc_get_coef0(codec) == 0x20)
-               /* alc888S-VC */
-               alc_write_coef_idx(codec, 7, 0x830);
-        else
-                /* alc888-VB */
-               alc_write_coef_idx(codec, 7, 0x3030);
+       int coef;
+
+       coef = alc_get_coef0(codec);
+
+       switch (codec->vendor_id) {
+       case 0x10ec0262:
+               alc_update_coef_idx(codec, 0x7, 0, 1<<5);
+               break;
+       case 0x10ec0267:
+       case 0x10ec0268:
+               alc_update_coef_idx(codec, 0x7, 0, 1<<13);
+               break;
+       case 0x10ec0269:
+               if ((coef & 0x00f0) == 0x0010)
+                       alc_update_coef_idx(codec, 0xd, 0, 1<<14);
+               if ((coef & 0x00f0) == 0x0020)
+                       alc_update_coef_idx(codec, 0x4, 1<<15, 0);
+               if ((coef & 0x00f0) == 0x0030)
+                       alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+               break;
+       case 0x10ec0280:
+       case 0x10ec0284:
+       case 0x10ec0290:
+       case 0x10ec0292:
+               alc_update_coef_idx(codec, 0x4, 1<<15, 0);
+               break;
+       case 0x10ec0233:
+       case 0x10ec0255:
+       case 0x10ec0282:
+       case 0x10ec0283:
+       case 0x10ec0286:
+       case 0x10ec0288:
+               alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+               break;
+       case 0x10ec0285:
+       case 0x10ec0293:
+               alc_update_coef_idx(codec, 0xa, 1<<13, 0);
+               break;
+       case 0x10ec0662:
+               if ((coef & 0x00f0) == 0x0030)
+                       alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */
+               break;
+       case 0x10ec0272:
+       case 0x10ec0273:
+       case 0x10ec0663:
+       case 0x10ec0665:
+       case 0x10ec0670:
+       case 0x10ec0671:
+       case 0x10ec0672:
+               alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
+               break;
+       case 0x10ec0668:
+               alc_update_coef_idx(codec, 0x7, 3<<13, 0);
+               break;
+       case 0x10ec0867:
+               alc_update_coef_idx(codec, 0x4, 1<<10, 0);
+               break;
+       case 0x10ec0888:
+               if ((coef & 0x00f0) == 0x0020 || (coef & 0x00f0) == 0x0030)
+                       alc_update_coef_idx(codec, 0x7, 1<<5, 0);
+               break;
+       case 0x10ec0892:
+               alc_update_coef_idx(codec, 0x7, 1<<5, 0);
+               break;
+       case 0x10ec0899:
+       case 0x10ec0900:
+               alc_update_coef_idx(codec, 0x7, 1<<1, 0);
+               break;
+       }
 }
 
-/* additional initialization for ALC889 variants */
-static void alc889_coef_init(struct hda_codec *codec)
+/* additional initialization for ALC888 variants */
+static void alc888_coef_init(struct hda_codec *codec)
 {
-       alc_update_coef_idx(codec, 7, 0, 0x2010);
+       switch (alc_get_coef0(codec) & 0x00f0) {
+       /* alc888-VA */
+       case 0x00:
+       /* alc888-VB */
+       case 0x10:
+               alc_update_coef_idx(codec, 7, 0, 0x2030); /* Turn EAPD to High */
+               break;
+       }
 }
 
 /* turn on/off EAPD control (only if available) */
@@ -343,6 +413,7 @@ static void alc_eapd_shutup(struct hda_codec *codec)
 /* generic EAPD initialization */
 static void alc_auto_init_amp(struct hda_codec *codec, int type)
 {
+       alc_fill_eapd_coef(codec);
        alc_auto_setup_eapd(codec, true);
        switch (type) {
        case ALC_INIT_GPIO1:
@@ -359,25 +430,15 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
                case 0x10ec0260:
                        alc_update_coefex_idx(codec, 0x1a, 7, 0, 0x2010);
                        break;
-               case 0x10ec0262:
                case 0x10ec0880:
                case 0x10ec0882:
                case 0x10ec0883:
                case 0x10ec0885:
-               case 0x10ec0887:
-               /*case 0x10ec0889:*/ /* this causes an SPDIF problem */
-               case 0x10ec0900:
-                       alc889_coef_init(codec);
+                       alc_update_coef_idx(codec, 7, 0, 0x2030);
                        break;
                case 0x10ec0888:
                        alc888_coef_init(codec);
                        break;
-#if 0 /* XXX: This may cause the silent output on speaker on some machines */
-               case 0x10ec0267:
-               case 0x10ec0268:
-                       alc_update_coef_idx(codec, 7, 0, 0x3000);
-                       break;
-#endif /* XXX */
                }
                break;
        }
@@ -1710,7 +1771,7 @@ static void alc889_fixup_coef(struct hda_codec *codec,
 {
        if (action != HDA_FIXUP_ACT_INIT)
                return;
-       alc889_coef_init(codec);
+       alc_update_coef_idx(codec, 7, 0, 0x2030);
 }
 
 /* toggle speaker-output according to the hp-jack state */
@@ -3350,6 +3411,27 @@ static void alc269_fixup_hp_gpio_mic1_led(struct hda_codec *codec,
        }
 }
 
+static void alc280_fixup_hp_gpio4(struct hda_codec *codec,
+                                  const struct hda_fixup *fix, int action)
+{
+       /* Like hp_gpio_mic1_led, but also needs GPIO4 low to enable headphone amp */
+       struct alc_spec *spec = codec->spec;
+       static const struct hda_verb gpio_init[] = {
+               { 0x01, AC_VERB_SET_GPIO_MASK, 0x18 },
+               { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x18 },
+               {}
+       };
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               spec->gen.vmaster_mute.hook = alc269_fixup_hp_gpio_mute_hook;
+               spec->gen.cap_sync_hook = alc269_fixup_hp_cap_mic_mute_hook;
+               spec->gpio_led = 0;
+               spec->cap_mute_led_nid = 0x18;
+               snd_hda_add_verbs(codec, gpio_init);
+               codec->power_filter = led_power_filter;
+       }
+}
+
 static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
                                const struct hda_fixup *fix, int action)
 {
@@ -4217,6 +4299,7 @@ enum {
        ALC283_FIXUP_BXBT2807_MIC,
        ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
        ALC282_FIXUP_ASPIRE_V5_PINS,
+       ALC280_FIXUP_HP_GPIO4,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -4437,6 +4520,8 @@ static const struct hda_fixup alc269_fixups[] = {
        [ALC269_FIXUP_HEADSET_MODE] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_headset_mode,
+               .chained = true,
+               .chain_id = ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED
        },
        [ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC] = {
                .type = HDA_FIXUP_FUNC,
@@ -4626,6 +4711,8 @@ static const struct hda_fixup alc269_fixups[] = {
        [ALC255_FIXUP_HEADSET_MODE] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_headset_mode_alc255,
+               .chained = true,
+               .chain_id = ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED
        },
        [ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC] = {
                .type = HDA_FIXUP_FUNC,
@@ -4661,8 +4748,6 @@ static const struct hda_fixup alc269_fixups[] = {
        [ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_dell_wmi,
-               .chained_before = true,
-               .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
        },
        [ALC282_FIXUP_ASPIRE_V5_PINS] = {
                .type = HDA_FIXUP_PINS,
@@ -4680,7 +4765,10 @@ static const struct hda_fixup alc269_fixups[] = {
                        { },
                },
        },
-
+       [ALC280_FIXUP_HP_GPIO4] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc280_fixup_hp_gpio4,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -4697,13 +4785,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED),
        SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
        SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
-       SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED),
        SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
        SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -4728,21 +4816,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
-       SND_PCI_QUIRK(0x103c, 0x8004, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        /* ALC290 */
        SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
-       SND_PCI_QUIRK(0x103c, 0x2246, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
-       SND_PCI_QUIRK(0x103c, 0x2247, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
-       SND_PCI_QUIRK(0x103c, 0x2248, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
-       SND_PCI_QUIRK(0x103c, 0x2249, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
-       SND_PCI_QUIRK(0x103c, 0x2258, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -4751,7 +4833,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
-       SND_PCI_QUIRK(0x103c, 0x2277, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2278, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -4804,7 +4885,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
-       SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
        SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -4984,6 +5065,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x17, 0x40000000},
                {0x1d, 0x40700001},
                {0x21, 0x02211040}),
+       SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
+               {0x12, 0x90a60130},
+               {0x13, 0x40000000},
+               {0x14, 0x90170110},
+               {0x15, 0x0421101f},
+               {0x16, 0x411111f0},
+               {0x17, 0x411111f0},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x04a11020},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40748605},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED,
                {0x12, 0x90a60140},
                {0x13, 0x40000000},
@@ -5194,9 +5288,6 @@ static void alc269_fill_coef(struct hda_codec *codec)
                }
        }
 
-       /* Class D */
-       alc_update_coef_idx(codec, 0xd, 0, 1<<14);
-
        /* HP */
        alc_update_coef_idx(codec, 0x4, 0, 1<<11);
 }
@@ -5651,6 +5742,35 @@ static void alc662_fixup_led_gpio1(struct hda_codec *codec,
        }
 }
 
+static struct coef_fw alc668_coefs[] = {
+       WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03,    0x0),
+       WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06,    0x0), WRITE_COEF(0x07, 0x0f80),
+       WRITE_COEF(0x08, 0x0031), WRITE_COEF(0x0a, 0x0060), WRITE_COEF(0x0b,    0x0),
+       WRITE_COEF(0x0c, 0x7cf7), WRITE_COEF(0x0d, 0x1080), WRITE_COEF(0x0e, 0x7f7f),
+       WRITE_COEF(0x0f, 0xcccc), WRITE_COEF(0x10, 0xddcc), WRITE_COEF(0x11, 0x0001),
+       WRITE_COEF(0x13,    0x0), WRITE_COEF(0x14, 0x2aa0), WRITE_COEF(0x17, 0xa940),
+       WRITE_COEF(0x19,    0x0), WRITE_COEF(0x1a,    0x0), WRITE_COEF(0x1b,    0x0),
+       WRITE_COEF(0x1c,    0x0), WRITE_COEF(0x1d,    0x0), WRITE_COEF(0x1e, 0x7418),
+       WRITE_COEF(0x1f, 0x0804), WRITE_COEF(0x20, 0x4200), WRITE_COEF(0x21, 0x0468),
+       WRITE_COEF(0x22, 0x8ccc), WRITE_COEF(0x23, 0x0250), WRITE_COEF(0x24, 0x7418),
+       WRITE_COEF(0x27,    0x0), WRITE_COEF(0x28, 0x8ccc), WRITE_COEF(0x2a, 0xff00),
+       WRITE_COEF(0x2b, 0x8000), WRITE_COEF(0xa7, 0xff00), WRITE_COEF(0xa8, 0x8000),
+       WRITE_COEF(0xaa, 0x2e17), WRITE_COEF(0xab, 0xa0c0), WRITE_COEF(0xac,    0x0),
+       WRITE_COEF(0xad,    0x0), WRITE_COEF(0xae, 0x2ac6), WRITE_COEF(0xaf, 0xa480),
+       WRITE_COEF(0xb0,    0x0), WRITE_COEF(0xb1,    0x0), WRITE_COEF(0xb2,    0x0),
+       WRITE_COEF(0xb3,    0x0), WRITE_COEF(0xb4,    0x0), WRITE_COEF(0xb5, 0x1040),
+       WRITE_COEF(0xb6, 0xd697), WRITE_COEF(0xb7, 0x902b), WRITE_COEF(0xb8, 0xd697),
+       WRITE_COEF(0xb9, 0x902b), WRITE_COEF(0xba, 0xb8ba), WRITE_COEF(0xbb, 0xaaab),
+       WRITE_COEF(0xbc, 0xaaaf), WRITE_COEF(0xbd, 0x6aaa), WRITE_COEF(0xbe, 0x1c02),
+       WRITE_COEF(0xc0, 0x00ff), WRITE_COEF(0xc1, 0x0fa6),
+       {}
+};
+
+static void alc668_restore_default_value(struct hda_codec *codec)
+{
+       alc_process_coef_fw(codec, alc668_coefs);
+}
+
 enum {
        ALC662_FIXUP_ASPIRE,
        ALC662_FIXUP_LED_GPIO1,
@@ -6077,29 +6197,6 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
        {}
 };
 
-static void alc662_fill_coef(struct hda_codec *codec)
-{
-       int coef;
-
-       coef = alc_get_coef0(codec);
-
-       switch (codec->vendor_id) {
-       case 0x10ec0662:
-               if ((coef & 0x00f0) == 0x0030)
-                       alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */
-               break;
-       case 0x10ec0272:
-       case 0x10ec0273:
-       case 0x10ec0663:
-       case 0x10ec0665:
-       case 0x10ec0670:
-       case 0x10ec0671:
-       case 0x10ec0672:
-               alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
-               break;
-       }
-}
-
 /*
  */
 static int patch_alc662(struct hda_codec *codec)
@@ -6118,8 +6215,11 @@ static int patch_alc662(struct hda_codec *codec)
 
        alc_fix_pll_init(codec, 0x20, 0x04, 15);
 
-       spec->init_hook = alc662_fill_coef;
-       alc662_fill_coef(codec);
+       switch (codec->vendor_id) {
+       case 0x10ec0668:
+               spec->init_hook = alc668_restore_default_value;
+               break;
+       }
 
        snd_hda_pick_fixup(codec, alc662_fixup_models,
                       alc662_fixup_tbl, alc662_fixups);
index cee51ae177c1e219325231eb517b91784ac258fe..c40428f25ba5c9b9b3d2e5e657a71020702dc12b 100644 (file)
@@ -46,6 +46,7 @@ static struct i2c_driver cs42l51_i2c_driver = {
        .driver = {
                .name = "cs42l51",
                .owner = THIS_MODULE,
+               .of_match_table = cs42l51_of_match,
        },
        .probe = cs42l51_i2c_probe,
        .remove = cs42l51_i2c_remove,
index 09488d97de60d040bbdb37edfa3a94a999930633..669c38fc303468d51dd6bbd3dfa16e935c030e8c 100644 (file)
@@ -558,11 +558,13 @@ error:
 }
 EXPORT_SYMBOL_GPL(cs42l51_probe);
 
-static const struct of_device_id cs42l51_of_match[] = {
+const struct of_device_id cs42l51_of_match[] = {
        { .compatible = "cirrus,cs42l51", },
        { }
 };
 MODULE_DEVICE_TABLE(of, cs42l51_of_match);
+EXPORT_SYMBOL_GPL(cs42l51_of_match);
+
 MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
 MODULE_DESCRIPTION("Cirrus Logic CS42L51 ALSA SoC Codec Driver");
 MODULE_LICENSE("GPL");
index 8c55bf384bc65189545807d9ce9278d6c66670f2..0ca805492ac4b77d110dc24b378845fb5b3768a6 100644 (file)
@@ -22,6 +22,7 @@ struct device;
 
 extern const struct regmap_config cs42l51_regmap;
 int cs42l51_probe(struct device *dev, struct regmap *regmap);
+extern const struct of_device_id cs42l51_of_match[];
 
 #define CS42L51_CHIP_ID                        0x1B
 #define CS42L51_CHIP_REV_A             0x00
index aae410d122ee1f8f7cb99fbd416fd02c0f17fdfd..2d05b5d3a6ce7fdc3531f5c322853dbb1a0ca63e 100644 (file)
@@ -19,7 +19,7 @@
 #include "es8328.h"
 
 static const struct i2c_device_id es8328_id[] = {
-       { "everest,es8328", 0 },
+       { "es8328", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, es8328_id);
index d519294f57c79c289784f9a71bb43d7bf51dc7e9..1229554f1464d1e1c37a1d82d227a5a6347e377d 100644 (file)
@@ -1941,13 +1941,13 @@ static int max98090_dai_set_sysclk(struct snd_soc_dai *dai,
         *               0x02 (when master clk is 20MHz to 40MHz)..
         *               0x03 (when master clk is 40MHz to 60MHz)..
         */
-       if ((freq >= 10000000) && (freq < 20000000)) {
+       if ((freq >= 10000000) && (freq <= 20000000)) {
                snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK,
                        M98090_PSCLK_DIV1);
-       } else if ((freq >= 20000000) && (freq < 40000000)) {
+       } else if ((freq > 20000000) && (freq <= 40000000)) {
                snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK,
                        M98090_PSCLK_DIV2);
-       } else if ((freq >= 40000000) && (freq < 60000000)) {
+       } else if ((freq > 40000000) && (freq <= 60000000)) {
                snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK,
                        M98090_PSCLK_DIV4);
        } else {
index 3fb83bf09768347f1bcd469d2be9be4b56ea62e2..d16331e0b64d4532647b5f4eb4d4abe58a4d66ec 100644 (file)
@@ -139,6 +139,7 @@ static const struct reg_default rt5645_reg[] = {
        { 0x76, 0x000a },
        { 0x77, 0x0c00 },
        { 0x78, 0x0000 },
+       { 0x79, 0x0123 },
        { 0x80, 0x0000 },
        { 0x81, 0x0000 },
        { 0x82, 0x0000 },
@@ -334,6 +335,7 @@ static bool rt5645_readable_register(struct device *dev, unsigned int reg)
        case RT5645_DMIC_CTRL2:
        case RT5645_TDM_CTRL_1:
        case RT5645_TDM_CTRL_2:
+       case RT5645_TDM_CTRL_3:
        case RT5645_GLB_CLK:
        case RT5645_PLL_CTRL1:
        case RT5645_PLL_CTRL2:
index ba9d9b4d485783c80efa30e13e3218f1fec7742e..9bd8b4f633032c713a432ca3af1bc38aa3c81f16 100644 (file)
@@ -100,18 +100,18 @@ static const struct reg_default rt5670_reg[] = {
        { 0x4c, 0x5380 },
        { 0x4f, 0x0073 },
        { 0x52, 0x00d3 },
-       { 0x53, 0xf0f0 },
+       { 0x53, 0xf000 },
        { 0x61, 0x0000 },
        { 0x62, 0x0001 },
        { 0x63, 0x00c3 },
        { 0x64, 0x0000 },
-       { 0x65, 0x0000 },
+       { 0x65, 0x0001 },
        { 0x66, 0x0000 },
        { 0x6f, 0x8000 },
        { 0x70, 0x8000 },
        { 0x71, 0x8000 },
        { 0x72, 0x8000 },
-       { 0x73, 0x1110 },
+       { 0x73, 0x7770 },
        { 0x74, 0x0e00 },
        { 0x75, 0x1505 },
        { 0x76, 0x0015 },
@@ -125,21 +125,21 @@ static const struct reg_default rt5670_reg[] = {
        { 0x83, 0x0000 },
        { 0x84, 0x0000 },
        { 0x85, 0x0000 },
-       { 0x86, 0x0008 },
+       { 0x86, 0x0004 },
        { 0x87, 0x0000 },
        { 0x88, 0x0000 },
        { 0x89, 0x0000 },
        { 0x8a, 0x0000 },
        { 0x8b, 0x0000 },
-       { 0x8c, 0x0007 },
+       { 0x8c, 0x0003 },
        { 0x8d, 0x0000 },
        { 0x8e, 0x0004 },
        { 0x8f, 0x1100 },
        { 0x90, 0x0646 },
        { 0x91, 0x0c06 },
        { 0x93, 0x0000 },
-       { 0x94, 0x0000 },
-       { 0x95, 0x0000 },
+       { 0x94, 0x1270 },
+       { 0x95, 0x1000 },
        { 0x97, 0x0000 },
        { 0x98, 0x0000 },
        { 0x99, 0x0000 },
@@ -150,11 +150,11 @@ static const struct reg_default rt5670_reg[] = {
        { 0x9e, 0x0400 },
        { 0xae, 0x7000 },
        { 0xaf, 0x0000 },
-       { 0xb0, 0x6000 },
+       { 0xb0, 0x7000 },
        { 0xb1, 0x0000 },
        { 0xb2, 0x0000 },
        { 0xb3, 0x001f },
-       { 0xb4, 0x2206 },
+       { 0xb4, 0x220c },
        { 0xb5, 0x1f00 },
        { 0xb6, 0x0000 },
        { 0xb7, 0x0000 },
@@ -171,25 +171,25 @@ static const struct reg_default rt5670_reg[] = {
        { 0xcf, 0x1813 },
        { 0xd0, 0x0690 },
        { 0xd1, 0x1c17 },
-       { 0xd3, 0xb320 },
+       { 0xd3, 0xa220 },
        { 0xd4, 0x0000 },
        { 0xd6, 0x0400 },
        { 0xd9, 0x0809 },
        { 0xda, 0x0000 },
        { 0xdb, 0x0001 },
        { 0xdc, 0x0049 },
-       { 0xdd, 0x0009 },
+       { 0xdd, 0x0024 },
        { 0xe6, 0x8000 },
        { 0xe7, 0x0000 },
-       { 0xec, 0xb300 },
+       { 0xec, 0xa200 },
        { 0xed, 0x0000 },
-       { 0xee, 0xb300 },
+       { 0xee, 0xa200 },
        { 0xef, 0x0000 },
        { 0xf8, 0x0000 },
        { 0xf9, 0x0000 },
        { 0xfa, 0x8010 },
        { 0xfb, 0x0033 },
-       { 0xfc, 0x0080 },
+       { 0xfc, 0x0100 },
 };
 
 static bool rt5670_volatile_register(struct device *dev, unsigned int reg)
@@ -1877,6 +1877,10 @@ static const struct snd_soc_dapm_route rt5670_dapm_routes[] = {
        { "DAC1 MIXR", "DAC1 Switch", "DAC1 R Mux" },
        { "DAC1 MIXR", NULL, "DAC Stereo1 Filter" },
 
+       { "DAC Stereo1 Filter", NULL, "PLL1", is_sys_clk_from_pll },
+       { "DAC Mono Left Filter", NULL, "PLL1", is_sys_clk_from_pll },
+       { "DAC Mono Right Filter", NULL, "PLL1", is_sys_clk_from_pll },
+
        { "DAC MIX", NULL, "DAC1 MIXL" },
        { "DAC MIX", NULL, "DAC1 MIXR" },
 
@@ -1926,14 +1930,10 @@ static const struct snd_soc_dapm_route rt5670_dapm_routes[] = {
 
        { "DAC L1", NULL, "DAC L1 Power" },
        { "DAC L1", NULL, "Stereo DAC MIXL" },
-       { "DAC L1", NULL, "PLL1", is_sys_clk_from_pll },
        { "DAC R1", NULL, "DAC R1 Power" },
        { "DAC R1", NULL, "Stereo DAC MIXR" },
-       { "DAC R1", NULL, "PLL1", is_sys_clk_from_pll },
        { "DAC L2", NULL, "Mono DAC MIXL" },
-       { "DAC L2", NULL, "PLL1", is_sys_clk_from_pll },
        { "DAC R2", NULL, "Mono DAC MIXR" },
-       { "DAC R2", NULL, "PLL1", is_sys_clk_from_pll },
 
        { "OUT MIXL", "BST1 Switch", "BST1" },
        { "OUT MIXL", "INL Switch", "INL VOL" },
index 6bb77d76561b8964303275955d1e1beb579ba859..dab9b15304af829a510742ec4a4827bdca90a453 100644 (file)
@@ -1299,8 +1299,7 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
 
        /* enable small pop, introduce 400ms delay in turning off */
        snd_soc_update_bits(codec, SGTL5000_CHIP_REF_CTRL,
-                               SGTL5000_SMALL_POP,
-                               SGTL5000_SMALL_POP);
+                               SGTL5000_SMALL_POP, 1);
 
        /* disable short cut detector */
        snd_soc_write(codec, SGTL5000_CHIP_SHORT_CTRL, 0);
index 2f8c88931f690579f86d497d03ef7327a156a94c..bd7a344bf8c517edf69af2fda9e06e08488e47e0 100644 (file)
 #define SGTL5000_BIAS_CTRL_MASK                        0x000e
 #define SGTL5000_BIAS_CTRL_SHIFT               1
 #define SGTL5000_BIAS_CTRL_WIDTH               3
-#define SGTL5000_SMALL_POP                     0x0001
+#define SGTL5000_SMALL_POP                     0
 
 /*
  * SGTL5000_CHIP_MIC_CTRL
index f412a9911a75d2fe531930f6a0108fb977c41e77..67124783558a5374b547f26d30d1ccc2bdc16ae4 100644 (file)
@@ -1355,6 +1355,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
                          file, blocks, pos - firmware->size);
 
 out_fw:
+       regmap_async_complete(regmap);
        release_firmware(firmware);
        wm_adsp_buf_free(&buf_list);
 out:
index ed866e9a2928c4ca5415571fc757f6ba588b636f..9deabdd2b1a29adf3d27335557cd1cc23fee11f4 100644 (file)
@@ -684,12 +684,38 @@ static bool fsl_asrc_writeable_reg(struct device *dev, unsigned int reg)
        }
 }
 
+static struct reg_default fsl_asrc_reg[] = {
+       { REG_ASRCTR, 0x0000 }, { REG_ASRIER, 0x0000 },
+       { REG_ASRCNCR, 0x0000 }, { REG_ASRCFG, 0x0000 },
+       { REG_ASRCSR, 0x0000 }, { REG_ASRCDR1, 0x0000 },
+       { REG_ASRCDR2, 0x0000 }, { REG_ASRSTR, 0x0000 },
+       { REG_ASRRA, 0x0000 }, { REG_ASRRB, 0x0000 },
+       { REG_ASRRC, 0x0000 }, { REG_ASRPM1, 0x0000 },
+       { REG_ASRPM2, 0x0000 }, { REG_ASRPM3, 0x0000 },
+       { REG_ASRPM4, 0x0000 }, { REG_ASRPM5, 0x0000 },
+       { REG_ASRTFR1, 0x0000 }, { REG_ASRCCR, 0x0000 },
+       { REG_ASRDIA, 0x0000 }, { REG_ASRDOA, 0x0000 },
+       { REG_ASRDIB, 0x0000 }, { REG_ASRDOB, 0x0000 },
+       { REG_ASRDIC, 0x0000 }, { REG_ASRDOC, 0x0000 },
+       { REG_ASRIDRHA, 0x0000 }, { REG_ASRIDRLA, 0x0000 },
+       { REG_ASRIDRHB, 0x0000 }, { REG_ASRIDRLB, 0x0000 },
+       { REG_ASRIDRHC, 0x0000 }, { REG_ASRIDRLC, 0x0000 },
+       { REG_ASR76K, 0x0A47 }, { REG_ASR56K, 0x0DF3 },
+       { REG_ASRMCRA, 0x0000 }, { REG_ASRFSTA, 0x0000 },
+       { REG_ASRMCRB, 0x0000 }, { REG_ASRFSTB, 0x0000 },
+       { REG_ASRMCRC, 0x0000 }, { REG_ASRFSTC, 0x0000 },
+       { REG_ASRMCR1A, 0x0000 }, { REG_ASRMCR1B, 0x0000 },
+       { REG_ASRMCR1C, 0x0000 },
+};
+
 static const struct regmap_config fsl_asrc_regmap_config = {
        .reg_bits = 32,
        .reg_stride = 4,
        .val_bits = 32,
 
        .max_register = REG_ASRMCR1C,
+       .reg_defaults = fsl_asrc_reg,
+       .num_reg_defaults = ARRAY_SIZE(fsl_asrc_reg),
        .readable_reg = fsl_asrc_readable_reg,
        .volatile_reg = fsl_asrc_volatile_reg,
        .writeable_reg = fsl_asrc_writeable_reg,
index f373e37f83050a246c2d35fbed008db7cdd0b5a5..c74ba37f862c121b1114edd84bded49ec7d72907 100644 (file)
@@ -154,8 +154,10 @@ static void rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
                        while (val) {
                                regmap_read(i2s->regmap, I2S_CLR, &val);
                                retry--;
-                               if (!retry)
+                               if (!retry) {
                                        dev_warn(i2s->dev, "fail to clear\n");
+                                       break;
+                               }
                        }
                }
        }
index 0acf5d0eed53a30979f66d466252b51f660f3d2e..72118a77dd5b70aa2a08fb51c32b5204063829f5 100644 (file)
@@ -110,6 +110,7 @@ static const struct of_device_id snow_of_match[] = {
        { .compatible = "google,snow-audio-max98095", },
        {},
 };
+MODULE_DEVICE_TABLE(of, snow_of_match);
 
 static struct platform_driver snow_driver = {
        .driver = {
index 66fddec9543d0ecc1bb414871d035bc244abc521..88e5df474ccf0ed089e23d9fe00f1268189886c9 100644 (file)
@@ -1711,8 +1711,7 @@ static const struct snd_soc_dai_ops fsi_dai_ops = {
 static struct snd_pcm_hardware fsi_pcm_hardware = {
        .info =         SNDRV_PCM_INFO_INTERLEAVED      |
                        SNDRV_PCM_INFO_MMAP             |
-                       SNDRV_PCM_INFO_MMAP_VALID       |
-                       SNDRV_PCM_INFO_PAUSE,
+                       SNDRV_PCM_INFO_MMAP_VALID,
        .buffer_bytes_max       = 64 * 1024,
        .period_bytes_min       = 32,
        .period_bytes_max       = 8192,
index 1922ec57d10a1fd29c174295adb6de5446e8d7a2..70042197f9e26eaf6f95f5f429435388b12976b4 100644 (file)
@@ -886,8 +886,7 @@ static int rsnd_dai_probe(struct platform_device *pdev,
 static struct snd_pcm_hardware rsnd_pcm_hardware = {
        .info =         SNDRV_PCM_INFO_INTERLEAVED      |
                        SNDRV_PCM_INFO_MMAP             |
-                       SNDRV_PCM_INFO_MMAP_VALID       |
-                       SNDRV_PCM_INFO_PAUSE,
+                       SNDRV_PCM_INFO_MMAP_VALID,
        .buffer_bytes_max       = 64 * 1024,
        .period_bytes_min       = 32,
        .period_bytes_max       = 8192,
index 4c8f8a23a0e9de132827fc041438fc1e07a4892c..b60ff56ebc0f56c2199a3473521fa657d9436713 100644 (file)
@@ -884,7 +884,7 @@ static struct snd_soc_dai *snd_soc_find_dai(
        list_for_each_entry(component, &component_list, list) {
                if (dlc->of_node && component->dev->of_node != dlc->of_node)
                        continue;
-               if (dlc->name && strcmp(dev_name(component->dev), dlc->name))
+               if (dlc->name && strcmp(component->name, dlc->name))
                        continue;
                list_for_each_entry(dai, &component->dai_list, list) {
                        if (dlc->dai_name && strcmp(dai->name, dlc->dai_name))
index 002311afdeaa8bd6ceb40d120b0c6afaa13c6119..57277dd79e112c2b59bf2ee3925bae6228bc30ac 100644 (file)
@@ -1522,13 +1522,36 @@ static void dpcm_set_fe_runtime(struct snd_pcm_substream *substream)
                dpcm_init_runtime_hw(runtime, &cpu_dai_drv->capture);
 }
 
+static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd);
+
+/* Set FE's runtime_update state; the state is protected via PCM stream lock
+ * for avoiding the race with trigger callback.
+ * If the state is unset and a trigger is pending while the previous operation,
+ * process the pending trigger action here.
+ */
+static void dpcm_set_fe_update_state(struct snd_soc_pcm_runtime *fe,
+                                    int stream, enum snd_soc_dpcm_update state)
+{
+       struct snd_pcm_substream *substream =
+               snd_soc_dpcm_get_substream(fe, stream);
+
+       snd_pcm_stream_lock_irq(substream);
+       if (state == SND_SOC_DPCM_UPDATE_NO && fe->dpcm[stream].trigger_pending) {
+               dpcm_fe_dai_do_trigger(substream,
+                                      fe->dpcm[stream].trigger_pending - 1);
+               fe->dpcm[stream].trigger_pending = 0;
+       }
+       fe->dpcm[stream].runtime_update = state;
+       snd_pcm_stream_unlock_irq(substream);
+}
+
 static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream)
 {
        struct snd_soc_pcm_runtime *fe = fe_substream->private_data;
        struct snd_pcm_runtime *runtime = fe_substream->runtime;
        int stream = fe_substream->stream, ret = 0;
 
-       fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+       dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
 
        ret = dpcm_be_dai_startup(fe, fe_substream->stream);
        if (ret < 0) {
@@ -1550,13 +1573,13 @@ static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream)
        dpcm_set_fe_runtime(fe_substream);
        snd_pcm_limit_hw_rates(runtime);
 
-       fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+       dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
        return 0;
 
 unwind:
        dpcm_be_dai_startup_unwind(fe, fe_substream->stream);
 be_err:
-       fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+       dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
        return ret;
 }
 
@@ -1603,7 +1626,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
        struct snd_soc_pcm_runtime *fe = substream->private_data;
        int stream = substream->stream;
 
-       fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+       dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
 
        /* shutdown the BEs */
        dpcm_be_dai_shutdown(fe, substream->stream);
@@ -1617,7 +1640,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
        dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
 
        fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
-       fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+       dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
        return 0;
 }
 
@@ -1665,7 +1688,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream)
        int err, stream = substream->stream;
 
        mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
-       fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+       dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
 
        dev_dbg(fe->dev, "ASoC: hw_free FE %s\n", fe->dai_link->name);
 
@@ -1680,7 +1703,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream)
        err = dpcm_be_dai_hw_free(fe, stream);
 
        fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE;
-       fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+       dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
 
        mutex_unlock(&fe->card->mutex);
        return 0;
@@ -1773,7 +1796,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream,
        int ret, stream = substream->stream;
 
        mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
-       fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+       dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
 
        memcpy(&fe->dpcm[substream->stream].hw_params, params,
                        sizeof(struct snd_pcm_hw_params));
@@ -1796,7 +1819,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream,
                fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS;
 
 out:
-       fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+       dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
        mutex_unlock(&fe->card->mutex);
        return ret;
 }
@@ -1910,7 +1933,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
 }
 EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
 
-static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
+static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
 {
        struct snd_soc_pcm_runtime *fe = substream->private_data;
        int stream = substream->stream, ret;
@@ -1984,6 +2007,23 @@ out:
        return ret;
 }
 
+static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+       struct snd_soc_pcm_runtime *fe = substream->private_data;
+       int stream = substream->stream;
+
+       /* if FE's runtime_update is already set, we're in race;
+        * process this trigger later at exit
+        */
+       if (fe->dpcm[stream].runtime_update != SND_SOC_DPCM_UPDATE_NO) {
+               fe->dpcm[stream].trigger_pending = cmd + 1;
+               return 0; /* delayed, assuming it's successful */
+       }
+
+       /* we're alone, let's trigger */
+       return dpcm_fe_dai_do_trigger(substream, cmd);
+}
+
 int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
 {
        struct snd_soc_dpcm *dpcm;
@@ -2027,7 +2067,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
 
        dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name);
 
-       fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+       dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
 
        /* there is no point preparing this FE if there are no BEs */
        if (list_empty(&fe->dpcm[stream].be_clients)) {
@@ -2054,7 +2094,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
        fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
 
 out:
-       fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+       dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
        mutex_unlock(&fe->card->mutex);
 
        return ret;
@@ -2201,11 +2241,11 @@ static int dpcm_run_new_update(struct snd_soc_pcm_runtime *fe, int stream)
 {
        int ret;
 
-       fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE;
+       dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE);
        ret = dpcm_run_update_startup(fe, stream);
        if (ret < 0)
                dev_err(fe->dev, "ASoC: failed to startup some BEs\n");
-       fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+       dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
 
        return ret;
 }
@@ -2214,11 +2254,11 @@ static int dpcm_run_old_update(struct snd_soc_pcm_runtime *fe, int stream)
 {
        int ret;
 
-       fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE;
+       dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE);
        ret = dpcm_run_update_shutdown(fe, stream);
        if (ret < 0)
                dev_err(fe->dev, "ASoC: failed to shutdown some BEs\n");
-       fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+       dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
 
        return ret;
 }
index 7ecd0e8a5c51ba435e590090a52259fe0be35561..f61ebb17cc64e3dd33aadd41185e00ea460ebbe9 100644 (file)
@@ -591,18 +591,19 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
 {
        struct snd_card *card;
        struct list_head *p;
+       bool was_shutdown;
 
        if (chip == (void *)-1L)
                return;
 
        card = chip->card;
        down_write(&chip->shutdown_rwsem);
+       was_shutdown = chip->shutdown;
        chip->shutdown = 1;
        up_write(&chip->shutdown_rwsem);
 
        mutex_lock(&register_mutex);
-       chip->num_interfaces--;
-       if (chip->num_interfaces <= 0) {
+       if (!was_shutdown) {
                struct snd_usb_endpoint *ep;
 
                snd_card_disconnect(card);
@@ -622,6 +623,10 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
                list_for_each(p, &chip->mixer_list) {
                        snd_usb_mixer_disconnect(p);
                }
+       }
+
+       chip->num_interfaces--;
+       if (chip->num_interfaces <= 0) {
                usb_chip[chip->index] = NULL;
                mutex_unlock(&register_mutex);
                snd_card_free_when_closed(card);
index 2e4a9dbc51faeca29529da8d231d779a10b92aed..6e354d3268585bfa38e2e35e4b1c6382ceb10067 100644 (file)
@@ -2033,10 +2033,11 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
        cval->res = 1;
        cval->initialized = 1;
 
-       if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
-               cval->control = UAC2_CX_CLOCK_SELECTOR;
-       else
+       if (state->mixer->protocol == UAC_VERSION_1)
                cval->control = 0;
+       else /* UAC_VERSION_2 */
+               cval->control = (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) ?
+                       UAC2_CX_CLOCK_SELECTOR : UAC2_SU_SELECTOR;
 
        namelist = kmalloc(sizeof(char *) * desc->bNrInPins, GFP_KERNEL);
        if (!namelist) {
index f119a41ed9a94986cc1ec1ccd99b1aac3c400260..8c9bf4b7aaf0e003db413347efe1ca2ae09053fe 100644 (file)
@@ -593,10 +593,10 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
        if (mixer->chip->shutdown)
                ret = -ENODEV;
        else
-               ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
+               ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
                                  USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
                                  0, wIndex,
-                                 &tmp, sizeof(tmp), 1000);
+                                 &tmp, sizeof(tmp));
        up_read(&mixer->chip->shutdown_rwsem);
 
        if (ret < 0) {
@@ -885,6 +885,11 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl,
        return changed;
 }
 
+static void kctl_private_value_free(struct snd_kcontrol *kctl)
+{
+       kfree((void *)kctl->private_value);
+}
+
 static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
        int validx, int bUnitID)
 {
@@ -919,6 +924,7 @@ static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
                return -ENOMEM;
        }
 
+       kctl->private_free = kctl_private_value_free;
        err = snd_ctl_add(mixer->chip->card, kctl);
        if (err < 0)
                return err;
index d2aa45a8d89546b378fa05fc94750a35f6fe5c18..60dfe0d28771bbc244ae8b4e41b737d8281c6f04 100644 (file)
@@ -1146,6 +1146,20 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
        if ((le16_to_cpu(dev->descriptor.idVendor) == 0x23ba) &&
            (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
                mdelay(20);
+
+       /* Marantz/Denon devices with USB DAC functionality need a delay
+        * after each class compliant request
+        */
+       if ((le16_to_cpu(dev->descriptor.idVendor) == 0x154e) &&
+           (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+
+               switch (le16_to_cpu(dev->descriptor.idProduct)) {
+               case 0x3005: /* Marantz HD-DAC1 */
+               case 0x3006: /* Marantz SA-14S1 */
+                       mdelay(20);
+                       break;
+               }
+       }
 }
 
 /*
@@ -1179,12 +1193,12 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        /* iFi Audio micro/nano iDSD */
        case USB_ID(0x20b1, 0x3008):
                if (fp->altsetting == 2)
-                       return SNDRV_PCM_FMTBIT_DSD_U32_LE;
+                       return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
        /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
        case USB_ID(0x20b1, 0x2009):
                if (fp->altsetting == 3)
-                       return SNDRV_PCM_FMTBIT_DSD_U32_LE;
+                       return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
        default:
                break;
index a8f81c782856270ff559dcf7d48d52aec182df31..515247601df4c783f576722cdd0261d41dfe9886 100755 (executable)
@@ -82,7 +82,7 @@ parse_opts() { # opts
 }
 
 # Parameters
-DEBUGFS_DIR=`grep debugfs /proc/mounts | cut -f2 -d' '`
+DEBUGFS_DIR=`grep debugfs /proc/mounts | cut -f2 -d' ' | head -1`
 TRACING_DIR=$DEBUGFS_DIR/tracing
 TOP_DIR=`absdir $0`
 TEST_DIR=$TOP_DIR/test.d
index 57b9c2b7c4ffbfb5950af351f9157e97a075caac..6f6733331d9597918353464a71af1e9d9e04a251 100644 (file)
@@ -128,7 +128,7 @@ static int sock_fanout_read_ring(int fd, void *ring)
        struct tpacket2_hdr *header = ring;
        int count = 0;
 
-       while (header->tp_status & TP_STATUS_USER && count < RING_NUM_FRAMES) {
+       while (count < RING_NUM_FRAMES && header->tp_status & TP_STATUS_USER) {
                count++;
                header = ring + (count * getpagesize());
        }
index 3aaca49de3257eed0bd905ac26112cacd49588dd..aacdb59f30dedcd780ee29e2903d928194a42a5c 100644 (file)
@@ -1933,7 +1933,7 @@ out:
 
 int kvm_vgic_create(struct kvm *kvm)
 {
-       int i, vcpu_lock_idx = -1, ret = 0;
+       int i, vcpu_lock_idx = -1, ret;
        struct kvm_vcpu *vcpu;
 
        mutex_lock(&kvm->lock);
@@ -1948,6 +1948,7 @@ int kvm_vgic_create(struct kvm *kvm)
         * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
         * that no other VCPUs are run while we create the vgic.
         */
+       ret = -EBUSY;
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (!mutex_trylock(&vcpu->mutex))
                        goto out_unlock;
@@ -1955,11 +1956,10 @@ int kvm_vgic_create(struct kvm *kvm)
        }
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
-               if (vcpu->arch.has_run_once) {
-                       ret = -EBUSY;
+               if (vcpu->arch.has_run_once)
                        goto out_unlock;
-               }
        }
+       ret = 0;
 
        spin_lock_init(&kvm->arch.vgic.lock);
        kvm->arch.vgic.in_kernel = true;
index 25ffac9e947d9d3e2d554e6c351dfa51811c0354..3cee7b167052b58e07c147abb65985865e39e0f9 100644 (file)
@@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
 
 static bool largepages_enabled = true;
 
-bool kvm_is_mmio_pfn(pfn_t pfn)
+bool kvm_is_reserved_pfn(pfn_t pfn)
 {
        if (pfn_valid(pfn))
-               return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
+               return PageReserved(pfn_to_page(pfn));
 
        return true;
 }
@@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
        else if ((vma->vm_flags & VM_PFNMAP)) {
                pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
                        vma->vm_pgoff;
-               BUG_ON(!kvm_is_mmio_pfn(pfn));
+               BUG_ON(!kvm_is_reserved_pfn(pfn));
        } else {
                if (async && vma_is_valid(vma, write_fault))
                        *async = true;
@@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
        if (is_error_noslot_pfn(pfn))
                return KVM_ERR_PTR_BAD_PAGE;
 
-       if (kvm_is_mmio_pfn(pfn)) {
+       if (kvm_is_reserved_pfn(pfn)) {
                WARN_ON(1);
                return KVM_ERR_PTR_BAD_PAGE;
        }
@@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
 
 void kvm_release_pfn_clean(pfn_t pfn)
 {
-       if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
+       if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
                put_page(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
@@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
 
 void kvm_set_pfn_dirty(pfn_t pfn)
 {
-       if (!kvm_is_mmio_pfn(pfn)) {
+       if (!kvm_is_reserved_pfn(pfn)) {
                struct page *page = pfn_to_page(pfn);
                if (!PageReserved(page))
                        SetPageDirty(page);
@@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
 
 void kvm_set_pfn_accessed(pfn_t pfn)
 {
-       if (!kvm_is_mmio_pfn(pfn))
+       if (!kvm_is_reserved_pfn(pfn))
                mark_page_accessed(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
 
 void kvm_get_pfn(pfn_t pfn)
 {
-       if (!kvm_is_mmio_pfn(pfn))
+       if (!kvm_is_reserved_pfn(pfn))
                get_page(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_get_pfn);