]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge commit '9f12600fe425bc28f0ccba034a77783c09c15af4' into for-linus
authorAl Viro <viro@zeniv.linux.org.uk>
Thu, 12 Jun 2014 04:27:11 +0000 (00:27 -0400)
committerAl Viro <viro@zeniv.linux.org.uk>
Thu, 12 Jun 2014 04:28:09 +0000 (00:28 -0400)
Backmerge of dcache.c changes from mainline.  It's that, or complete
rebase...

Conflicts:
fs/splice.c

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
599 files changed:
Documentation/ABI/testing/sysfs-bus-pci
Documentation/DocBook/drm.tmpl
Documentation/DocBook/media/Makefile
Documentation/debugging-via-ohci1394.txt
Documentation/device-mapper/thin-provisioning.txt
Documentation/devicetree/bindings/clock/at91-clock.txt
Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
Documentation/devicetree/bindings/dma/ti-edma.txt
Documentation/devicetree/bindings/net/mdio-gpio.txt
Documentation/email-clients.txt
Documentation/filesystems/proc.txt
Documentation/hwmon/sysfs-interface
Documentation/input/elantech.txt
Documentation/java.txt
Documentation/kernel-parameters.txt
Documentation/networking/filter.txt
Documentation/networking/packet_mmap.txt
Documentation/virtual/kvm/api.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am3517.dtsi
arch/arm/boot/dts/am437x-gp-evm.dts
arch/arm/boot/dts/armada-370-db.dts
arch/arm/boot/dts/armada-375-db.dts
arch/arm/boot/dts/armada-380.dtsi
arch/arm/boot/dts/armada-385.dtsi
arch/arm/boot/dts/armada-xp-db.dts
arch/arm/boot/dts/armada-xp-gp.dts
arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
arch/arm/boot/dts/at91-sama5d3_xplained.dts
arch/arm/boot/dts/at91sam9260.dtsi
arch/arm/boot/dts/at91sam9261.dtsi
arch/arm/boot/dts/at91sam9rl.dtsi
arch/arm/boot/dts/exynos4412-trats2.dts
arch/arm/boot/dts/exynos5250-arndale.dts
arch/arm/boot/dts/exynos5420-arndale-octa.dts
arch/arm/boot/dts/exynos5420.dtsi
arch/arm/boot/dts/imx53-mba53.dts
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
arch/arm/boot/dts/kirkwood-nsa310-common.dtsi
arch/arm/boot/dts/kirkwood-t5325.dts
arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
arch/arm/boot/dts/omap2.dtsi
arch/arm/boot/dts/omap2420.dtsi
arch/arm/boot/dts/omap2430.dtsi
arch/arm/boot/dts/omap3-cm-t3x30.dtsi
arch/arm/boot/dts/omap3-igep.dtsi
arch/arm/boot/dts/omap3-igep0020.dts
arch/arm/boot/dts/omap3-sb-t35.dtsi
arch/arm/boot/dts/omap3-sbc-t3517.dts
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/sama5d3.dtsi
arch/arm/boot/dts/sama5d3_mci2.dtsi
arch/arm/boot/dts/sama5d3_tcb1.dtsi
arch/arm/boot/dts/sama5d3_uart.dtsi
arch/arm/boot/dts/ste-ccu8540.dts
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/common/bL_switcher.c
arch/arm/common/edma.c
arch/arm/configs/exynos_defconfig
arch/arm/configs/sunxi_defconfig
arch/arm/include/asm/trusted_foundations.h
arch/arm/include/asm/uaccess.h
arch/arm/include/asm/xen/page.h
arch/arm/kernel/entry-header.S
arch/arm/kernel/unwind.c
arch/arm/mach-at91/at91sam9260_devices.c
arch/arm/mach-exynos/firmware.c
arch/arm/mach-imx/devices/platform-ipu-core.c
arch/arm/mach-mvebu/mvebu-soc-id.c
arch/arm/mach-omap2/board-flash.c
arch/arm/mach-omap2/cclock3xxx_data.c
arch/arm/mach-omap2/cpuidle44xx.c
arch/arm/mach-omap2/omap-headsmp.S
arch/arm/mach-omap2/omap_hwmod_54xx_data.c
arch/arm/mach-orion5x/common.h
arch/arm/mm/proc-v7m.S
arch/arm/plat-omap/dma.c
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/pgtable.h
arch/arm64/kernel/irq.c
arch/arm64/mm/hugetlbpage.c
arch/ia64/include/asm/unistd.h
arch/ia64/include/uapi/asm/unistd.h
arch/ia64/kernel/entry.S
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/metag/include/asm/barrier.h
arch/metag/include/asm/processor.h
arch/metag/include/uapi/asm/Kbuild
arch/metag/include/uapi/asm/resource.h [deleted file]
arch/mips/dec/ecc-berr.c
arch/mips/dec/kn02xa-berr.c
arch/mips/dec/prom/Makefile
arch/mips/dec/prom/call_o32.S [deleted file]
arch/mips/fw/lib/call_o32.S
arch/mips/fw/sni/sniprom.c
arch/mips/include/asm/dec/prom.h
arch/mips/include/asm/rm9k-ocd.h [deleted file]
arch/mips/include/asm/syscall.h
arch/mips/include/uapi/asm/inst.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/kernel/proc.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/lantiq/dts/easy50712.dts
arch/mips/lib/csum_partial.S
arch/mips/lib/delay.c
arch/mips/lib/strncpy_user.S
arch/mips/loongson/Kconfig
arch/mips/loongson/lemote-2f/clock.c
arch/mips/mm/tlb-funcs.S
arch/mips/mm/tlbex.c
arch/mips/ralink/dts/mt7620a_eval.dts
arch/mips/ralink/dts/rt2880_eval.dts
arch/mips/ralink/dts/rt3052_eval.dts
arch/mips/ralink/dts/rt3883_eval.dts
arch/parisc/Kconfig
arch/parisc/include/asm/processor.h
arch/parisc/include/uapi/asm/unistd.h
arch/parisc/kernel/sys_parisc.c
arch/parisc/kernel/syscall.S
arch/parisc/kernel/syscall_table.S
arch/parisc/kernel/traps.c
arch/parisc/mm/fault.c
arch/powerpc/Makefile
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/include/asm/sections.h
arch/powerpc/kernel/kvm.c
arch/powerpc/kernel/machine_kexec_64.c
arch/powerpc/kernel/time.c
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/platforms/powernv/eeh-ioda.c
arch/s390/crypto/aes_s390.c
arch/s390/crypto/des_s390.c
arch/s390/kvm/kvm-s390.c
arch/s390/net/bpf_jit_comp.c
arch/sparc/include/asm/pgtable_64.h
arch/sparc/kernel/sysfs.c
arch/sparc/lib/NG2memcpy.S
arch/sparc/mm/fault_64.c
arch/sparc/mm/tsb.c
arch/x86/Makefile
arch/x86/boot/Makefile
arch/x86/boot/compressed/misc.c
arch/x86/include/asm/hpet.h
arch/x86/include/asm/hugetlb.h
arch/x86/include/asm/page_64_types.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/cpu/mcheck/threshold.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/rdrand.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/head32.c
arch/x86/kernel/head64.c
arch/x86/kernel/hpet.c
arch/x86/kernel/ldt.c
arch/x86/kernel/process_64.c
arch/x86/kernel/reboot.c
arch/x86/kernel/smp.c
arch/x86/kernel/traps.c
arch/x86/kernel/vsmp_64.c
arch/x86/kernel/vsyscall_gtod.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lguest/boot.c
arch/x86/lib/msr.c
arch/x86/math-emu/errors.c
arch/x86/net/bpf_jit_comp.c
arch/x86/platform/efi/early_printk.c
arch/x86/platform/olpc/olpc-xo1-pm.c
arch/x86/power/hibernate_64.c
arch/x86/vdso/vdso32-setup.c
arch/x86/xen/enlighten.c
arch/x86/xen/irq.c
block/blk-cgroup.c
drivers/Makefile
drivers/acpi/Kconfig
drivers/acpi/Makefile
drivers/acpi/ac.c
drivers/acpi/acpi_platform.c
drivers/acpi/acpi_processor.c
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/tbutils.c
drivers/acpi/battery.c
drivers/acpi/blacklist.c
drivers/acpi/cm_sbs.c [new file with mode: 0644]
drivers/acpi/thermal.c
drivers/acpi/video.c
drivers/ata/Kconfig
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ahci_imx.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/block/virtio_blk.c
drivers/bus/mvebu-mbus.c
drivers/char/random.c
drivers/char/tpm/tpm_ppi.c
drivers/clk/bcm/clk-kona-setup.c
drivers/clk/bcm/clk-kona.c
drivers/clk/bcm/clk-kona.h
drivers/clk/clk-divider.c
drivers/clk/clk.c
drivers/clk/shmobile/clk-mstp.c
drivers/clk/socfpga/clk-pll.c
drivers/clk/socfpga/clk.c
drivers/clk/st/clkgen-pll.c
drivers/clk/tegra/clk-pll.c
drivers/clocksource/tcb_clksrc.c
drivers/clocksource/timer-marco.c
drivers/cpufreq/cpufreq-cpu0.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/crypto/caam/error.c
drivers/dma/dmaengine.c
drivers/dma/dw/core.c
drivers/dma/mv_xor.c
drivers/dma/sa11x0-dma.c
drivers/firewire/core.h
drivers/firewire/ohci.c
drivers/firmware/iscsi_ibft.c
drivers/gpio/gpio-ich.c
drivers/gpio/gpio-mcp23s08.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
drivers/gpu/drm/nouveau/core/subdev/bios/base.c
drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/cikd.h
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_dma.c
drivers/gpu/drm/radeon/kv_dpm.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_dma.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_family.h
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ucode.h
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/radeon_vce.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/rv770_dma.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dma.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/radeon/uvd_v1_0.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-multitouch.c
drivers/hid/hid-sensor-hub.c
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/Kconfig
drivers/hwmon/emc1403.c
drivers/hwmon/ntc_thermistor.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-nomadik.c
drivers/i2c/busses/i2c-qup.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/input/keyboard/Kconfig
drivers/input/keyboard/atkbd.c
drivers/input/keyboard/pxa27x_keypad.c
drivers/input/keyboard/tca8418_keypad.c
drivers/input/misc/bma150.c
drivers/input/mouse/Kconfig
drivers/input/mouse/elantech.c
drivers/input/mouse/elantech.h
drivers/input/mouse/synaptics.c
drivers/input/serio/ambakmi.c
drivers/input/touchscreen/Kconfig
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_v2.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-mpath.c
drivers/md/dm-thin.c
drivers/md/md.c
drivers/md/raid10.c
drivers/media/i2c/ov7670.c
drivers/media/i2c/s5c73m3/s5c73m3-core.c
drivers/media/media-device.c
drivers/media/platform/davinci/vpbe_display.c
drivers/media/platform/davinci/vpfe_capture.c
drivers/media/platform/davinci/vpif_capture.c
drivers/media/platform/davinci/vpif_display.c
drivers/media/platform/exynos4-is/fimc-core.c
drivers/media/tuners/fc2580.c
drivers/media/tuners/fc2580_priv.h
drivers/media/usb/dvb-usb-v2/Makefile
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
drivers/media/usb/gspca/sonixb.c
drivers/media/v4l2-core/v4l2-compat-ioctl32.c
drivers/memory/mvebu-devbus.c
drivers/mfd/rtsx_pcr.c
drivers/mmc/host/rtsx_pci_sdmmc.c
drivers/mtd/nand/davinci_nand.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_options.c
drivers/net/bonding/bonding.h
drivers/net/can/c_can/Kconfig
drivers/net/can/c_can/c_can.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/altera/Makefile
drivers/net/ethernet/altera/altera_msgdma.c
drivers/net/ethernet/altera/altera_msgdmahw.h
drivers/net/ethernet/altera/altera_sgdma.c
drivers/net/ethernet/altera/altera_sgdmahw.h
drivers/net/ethernet/altera/altera_tse.h
drivers/net/ethernet/altera/altera_tse_ethtool.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/altera/altera_utils.c
drivers/net/ethernet/altera/altera_utils.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/ec_bhf.c [new file with mode: 0644]
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/macvlan.c
drivers/net/phy/mdio-gpio.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/usb/cdc_mbim.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/of/base.c
drivers/pci/host/pci-mvebu.c
drivers/pci/hotplug/shpchp_ctrl.c
drivers/pci/pci.c
drivers/pinctrl/vt8500/pinctrl-wmt.c
drivers/pnp/pnpbios/bioscalls.c
drivers/ptp/Kconfig
drivers/rtc/rtc-hym8563.c
drivers/scsi/scsi_transport_sas.c
drivers/sh/Makefile
drivers/sh/pm_runtime.c
drivers/spi/spi-pxa2xx-dma.c
drivers/spi/spi-qup.c
drivers/spi/spi.c
drivers/staging/imx-drm/imx-drm-core.c
drivers/staging/imx-drm/imx-tve.c
drivers/staging/media/davinci_vpfe/vpfe_video.c
drivers/staging/media/sn9c102/sn9c102_devtable.h
drivers/staging/rtl8723au/os_dep/os_intfs.c
drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/target_core_device.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/xen/events/events_fifo.c
fs/afs/cmservice.c
fs/afs/internal.h
fs/afs/rxrpc.c
fs/btrfs/ioctl.c
fs/btrfs/send.c
fs/cifs/inode.c
fs/dcache.c
fs/exec.c
fs/kernfs/file.c
fs/locks.c
fs/nfsd/nfs4acl.c
fs/nfsd/nfs4state.c
fs/ocfs2/dlm/dlmmaster.c
fs/splice.c
fs/sysfs/file.c
fs/sysfs/mount.c
fs/xfs/xfs_attr.c
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_attr_list.c
fs/xfs/xfs_attr_remote.c
fs/xfs/xfs_da_btree.h
fs/xfs/xfs_export.c
fs/xfs/xfs_file.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_log.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_qm.c
fs/xfs/xfs_sb.c
fs/xfs/xfs_super.c
include/asm-generic/resource.h
include/drm/drm_pciids.h
include/drm/i915_pciids.h
include/dt-bindings/clock/at91.h [moved from include/dt-bindings/clk/at91.h with 100% similarity]
include/linux/amba/bus.h
include/linux/cgroup.h
include/linux/dmaengine.h
include/linux/if_macvlan.h
include/linux/if_vlan.h
include/linux/interrupt.h
include/linux/kernfs.h
include/linux/linkage.h
include/linux/mfd/rtsx_common.h
include/linux/mfd/rtsx_pci.h
include/linux/mlx4/qp.h
include/linux/net.h
include/linux/netdevice.h
include/linux/of.h
include/linux/of_mdio.h
include/linux/omap-dma.h
include/linux/perf_event.h
include/linux/rtnetlink.h
include/linux/sched.h
include/net/cfg80211.h
include/net/ip6_route.h
include/net/netns/ipv4.h
include/trace/events/module.h
include/uapi/asm-generic/resource.h
include/uapi/asm-generic/unistd.h
include/uapi/linux/audit.h
include/uapi/linux/nl80211.h
init/main.c
kernel/cgroup.c
kernel/cgroup_freezer.c
kernel/context_tracking.c
kernel/events/core.c
kernel/hrtimer.c
kernel/kexec.c
kernel/locking/lockdep.c
kernel/power/snapshot.c
kernel/printk/printk.c
kernel/sched/core.c
kernel/sched/cpudeadline.c
kernel/sched/cpupri.c
kernel/sched/cputime.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/softirq.c
kernel/tracepoint.c
kernel/workqueue.c
lib/dump_stack.c
mm/Kconfig
mm/filemap.c
mm/kmemleak.c
mm/madvise.c
mm/memcontrol.c
mm/memory-failure.c
mm/mremap.c
mm/percpu.c
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/fragmentation.c
net/batman-adv/gateway_client.c
net/batman-adv/hard-interface.c
net/batman-adv/originator.c
net/bridge/br_netfilter.c
net/ceph/messenger.c
net/ceph/osdmap.c
net/core/dev.c
net/core/neighbour.c
net/core/net_namespace.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/utils.c
net/dsa/dsa.c
net/ipv4/af_inet.c
net/ipv4/fib_semantics.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_forward.c
net/ipv4/ip_fragment.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/ping.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/xfrm4_output.c
net/ipv4/xfrm4_protocol.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ndisc.c
net/ipv6/netfilter.c
net/ipv6/route.c
net/ipv6/tcpv6_offload.c
net/ipv6/xfrm6_output.c
net/ipv6/xfrm6_protocol.c
net/iucv/af_iucv.c
net/mac80211/ieee80211_i.h
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/status.c
net/mac80211/trace.h
net/mac80211/util.c
net/mac80211/vht.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_tables_core.c
net/netfilter/nfnetlink.c
net/rxrpc/ar-key.c
net/sched/cls_tcindex.c
net/wireless/scan.c
net/wireless/sme.c
scripts/checksyscalls.sh
security/device_cgroup.c
sound/core/pcm_dmaengine.c
sound/isa/sb/sb_mixer.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/tlv320aic31xx.c
sound/soc/codecs/wm8962.c
sound/soc/codecs/wm8962.h
sound/soc/fsl/fsl_esai.c
sound/soc/fsl/imx-audmux.c
sound/soc/intel/sst-acpi.c
sound/soc/intel/sst-baytrail-dsp.c
sound/soc/intel/sst-baytrail-ipc.c
sound/soc/intel/sst-dsp-priv.h
sound/soc/intel/sst-dsp.c
sound/soc/intel/sst-dsp.h
sound/soc/intel/sst-firmware.c
sound/soc/intel/sst-haswell-dsp.c
sound/soc/intel/sst-haswell-ipc.c
sound/soc/intel/sst-haswell-ipc.h
sound/soc/intel/sst-haswell-pcm.c
sound/soc/sh/rcar/core.c
sound/soc/soc-dapm.c
sound/soc/soc-pcm.c
sound/usb/card.c
sound/usb/card.h
sound/usb/endpoint.c
sound/usb/pcm.c
sound/usb/usbaudio.h
tools/Makefile
tools/lib/lockdep/Makefile

index a3c5a6685036103e7ec677272acdc4620d0302f6..ab8d76dfaa8096bbdf42bacf31336b856c07959b 100644 (file)
@@ -117,7 +117,7 @@ Description:
 
 What:          /sys/bus/pci/devices/.../vpd
 Date:          February 2008
-Contact:       Ben Hutchings <bhutchings@solarflare.com>
+Contact:       Ben Hutchings <bwh@kernel.org>
 Description:
                A file named vpd in a device directory will be a
                binary file containing the Vital Product Data for the
index 677a02553ec0cd772c1a86c73b7d314f48376190..ba60d93c18551af17db0e1bcbca7670662c025f9 100644 (file)
@@ -79,7 +79,7 @@
   <partintro>
     <para>
       This first part of the DRM Developer's Guide documents core DRM code,
-      helper libraries for writting drivers and generic userspace interfaces
+      helper libraries for writing drivers and generic userspace interfaces
       exposed by DRM drivers.
     </para>
   </partintro>
@@ -459,7 +459,7 @@ char *date;</synopsis>
       providing a solution to every graphics memory-related problems, GEM
       identified common code between drivers and created a support library to
       share it. GEM has simpler initialization and execution requirements than
-      TTM, but has no video RAM management capabitilies and is thus limited to
+      TTM, but has no video RAM management capabilities and is thus limited to
       UMA devices.
     </para>
     <sect2>
@@ -889,7 +889,7 @@ int (*prime_fd_to_handle)(struct drm_device *dev,
            vice versa. Drivers must use the kernel dma-buf buffer sharing framework
            to manage the PRIME file descriptors. Similar to the mode setting
            API PRIME is agnostic to the underlying buffer object manager, as
-           long as handles are 32bit unsinged integers.
+           long as handles are 32bit unsigned integers.
          </para>
          <para>
            While non-GEM drivers must implement the operations themselves, GEM
@@ -2356,7 +2356,7 @@ void intel_crt_init(struct drm_device *dev)
       first create properties and then create and associate individual instances
       of those properties to objects. A property can be instantiated multiple
       times and associated with different objects. Values are stored in property
-      instances, and all other property information are stored in the propery
+      instances, and all other property information are stored in the property
       and shared between all instances of the property.
     </para>
     <para>
@@ -2697,10 +2697,10 @@ int num_ioctls;</synopsis>
   <sect1>
     <title>Legacy Support Code</title>
     <para>
-      The section very brievely covers some of the old legacy support code which
+      The section very briefly covers some of the old legacy support code which
       is only used by old DRM drivers which have done a so-called shadow-attach
       to the underlying device instead of registering as a real driver. This
-      also includes some of the old generic buffer mangement and command
+      also includes some of the old generic buffer management and command
       submission code. Do not use any of this in new and modern drivers.
     </para>
 
index f9fd615427fbd4c0a718d66ed1b7de006ab796e6..1d27f0a1abd1e1872b0e05693ab35d6ecd64b0f2 100644 (file)
@@ -195,7 +195,7 @@ DVB_DOCUMENTED = \
 #
 
 install_media_images = \
-       $(Q)cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
+       $(Q)-cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
 
 $(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
        $(Q)base64 -d $< >$@
index fa0151a712f9c5a0d9fe43db6e0746ff4046a5e0..5c9a567b3fac0a6907343608a550583268cd5450 100644 (file)
@@ -25,9 +25,11 @@ using data transfer rates in the order of 10MB/s or more.
 With most FireWire controllers, memory access is limited to the low 4 GB
 of physical address space.  This can be a problem on IA64 machines where
 memory is located mostly above that limit, but it is rarely a problem on
-more common hardware such as x86, x86-64 and PowerPC.  However, at least
-Agere/LSI FW643e and FW643e2 controllers are known to support access to
-physical addresses above 4 GB.
+more common hardware such as x86, x86-64 and PowerPC.
+
+At least LSI FW643e and FW643e2 controllers are known to support access to
+physical addresses above 4 GB, but this feature is currently not enabled by
+Linux.
 
 Together with a early initialization of the OHCI-1394 controller for debugging,
 this facility proved most useful for examining long debugs logs in the printk
@@ -101,8 +103,9 @@ Step-by-step instructions for using firescope with early OHCI initialization:
    compliant, they are based on TI PCILynx chips and require drivers for Win-
    dows operating systems.
 
-   The mentioned kernel log message contains ">4 GB phys DMA" in case of
-   OHCI-1394 controllers which support accesses above this limit.
+   The mentioned kernel log message contains the string "physUB" if the
+   controller implements a writable Physical Upper Bound register.  This is
+   required for physical DMA above 4 GB (but not utilized by Linux yet).
 
 2) Establish a working FireWire cable connection:
 
index 05a27e9442bd4e5c4d3e618a6a0594e55466b192..2f5173500bd953b32e55134012af50ac93e46ad8 100644 (file)
@@ -309,7 +309,10 @@ ii) Status
     error_if_no_space|queue_if_no_space
        If the pool runs out of data or metadata space, the pool will
        either queue or error the IO destined to the data device.  The
-       default is to queue the IO until more space is added.
+       default is to queue the IO until more space is added or the
+       'no_space_timeout' expires.  The 'no_space_timeout' dm-thin-pool
+       module parameter can be used to change this timeout -- it
+       defaults to 60 seconds but may be disabled using a value of 0.
 
 iii) Messages
 
index cd5e23912888cf4350750fe0adc908fb895e3d08..6794cdc96d8fdf44513a4ad2310fe67a2a93712d 100644 (file)
@@ -62,7 +62,7 @@ Required properties for PMC node:
 - interrupt-controller : tell that the PMC is an interrupt controller.
 - #interrupt-cells : must be set to 1. The first cell encodes the interrupt id,
        and reflect the bit position in the PMC_ER/DR/SR registers.
-       You can use the dt macros defined in dt-bindings/clk/at91.h.
+       You can use the dt macros defined in dt-bindings/clock/at91.h.
        0 (AT91_PMC_MOSCS) -> main oscillator ready
        1 (AT91_PMC_LOCKA) -> PLL A ready
        2 (AT91_PMC_LOCKB) -> PLL B ready
index 5992dceec7af7d1e9d1ac8f329b831d48e409d87..02a25d99ca61bfb33a46a2ce8e547b7f22d3c73a 100644 (file)
@@ -43,7 +43,7 @@ Example
                clock-output-names =
                        "tpu0", "mmcif1", "sdhi3", "sdhi2",
                         "sdhi1", "sdhi0", "mmcif0";
-               renesas,clock-indices = <
+               clock-indices = <
                        R8A7790_CLK_TPU0 R8A7790_CLK_MMCIF1 R8A7790_CLK_SDHI3
                        R8A7790_CLK_SDHI2 R8A7790_CLK_SDHI1 R8A7790_CLK_SDHI0
                        R8A7790_CLK_MMCIF0
index 9fbbdb783a72d50600755a074cbc11cfba97f713..68ff2137bae7261e84409a472fbf432989fca0d6 100644 (file)
@@ -29,6 +29,6 @@ edma: edma@49000000 {
        dma-channels = <64>;
        ti,edma-regions = <4>;
        ti,edma-slots = <256>;
-       ti,edma-xbar-event-map = <1 12
-                                 2 13>;
+       ti,edma-xbar-event-map = /bits/ 16 <1 12
+                                           2 13>;
 };
index c79bab025369af4bb6320ba0e90f7fb386942cc1..8dbcf8295c6c9ceaa4eb7518694acf3b09095dc1 100644 (file)
@@ -14,7 +14,7 @@ node.
 Example:
 
 aliases {
-       mdio-gpio0 = <&mdio0>;
+       mdio-gpio0 = &mdio0;
 };
 
 mdio0: mdio {
index e9f5daccbd02e2d8e97b489faf372dbb307529d8..4e30ebaa9e5b2652950b2383817e852d2e2239e0 100644 (file)
@@ -201,20 +201,15 @@ To beat some sense out of the internal editor, do this:
 
 - Edit your Thunderbird config settings so that it won't use format=flowed.
   Go to "edit->preferences->advanced->config editor" to bring up the
-  thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to
-  "false".
+  thunderbird's registry editor.
 
-- Disable HTML Format: Set "mail.identity.id1.compose_html" to "false".
+- Set "mailnews.send_plaintext_flowed" to "false"
 
-- Enable "preformat" mode: Set "editor.quotesPreformatted" to "true".
+- Set "mailnews.wraplength" from "72" to "0"
 
-- Enable UTF8: Set "prefs.converted-to-utf8" to "true".
+- "View" > "Message Body As" > "Plain Text"
 
-- Install the "toggle wordwrap" extension.  Download the file from:
-    https://addons.mozilla.org/thunderbird/addon/2351/
-  Then go to "tools->add ons", select "install" at the bottom of the screen,
-  and browse to where you saved the .xul file.  This adds an "Enable
-  Wordwrap" entry under the Options menu of the message composer.
+- "View" > "Character Encoding" > "Unicode (UTF-8)"
 
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 TkRat (GUI)
index 8b9cd8eb3f917e63074ba0ffae0e83b767a87176..264bcde0c51c55629066e9809af3bab99fe05c8c 100644 (file)
@@ -1245,8 +1245,9 @@ second).  The meanings of the columns are as follows, from left to right:
 
 The "intr" line gives counts of interrupts  serviced since boot time, for each
 of the  possible system interrupts.   The first  column  is the  total of  all
-interrupts serviced; each  subsequent column is the  total for that particular
-interrupt.
+interrupts serviced  including  unnumbered  architecture specific  interrupts;
+each  subsequent column is the  total for that particular numbered interrupt.
+Unnumbered interrupts are not shown, only summed into the total.
 
 The "ctxt" line gives the total number of context switches across all CPUs.
 
index 79f8257dd790703f8a9285e7b498f585df87e97b..2cc95ad466047b055d45f748619cf2d7231d5e93 100644 (file)
@@ -327,6 +327,13 @@ temp[1-*]_max_hyst
                from the max value.
                RW
 
+temp[1-*]_min_hyst
+               Temperature hysteresis value for min limit.
+               Unit: millidegree Celsius
+               Must be reported as an absolute temperature, NOT a delta
+               from the min value.
+               RW
+
 temp[1-*]_input Temperature input value.
                Unit: millidegree Celsius
                RO
@@ -362,6 +369,13 @@ temp[1-*]_lcrit    Temperature critical min value, typically lower than
                Unit: millidegree Celsius
                RW
 
+temp[1-*]_lcrit_hyst
+               Temperature hysteresis value for critical min limit.
+               Unit: millidegree Celsius
+               Must be reported as an absolute temperature, NOT a delta
+               from the critical min value.
+               RW
+
 temp[1-*]_offset
                Temperature offset which is added to the temperature reading
                by the chip.
index 5602eb71ad5d7318e50952846d4f82edd3dcc239..e1ae127ed099d4934e1d7fb95f1a8ba819c1da9b 100644 (file)
@@ -504,9 +504,12 @@ byte 5:
 * reg_10
 
    bit   7   6   5   4   3   2   1   0
-         0   0   0   0   0   0   0   A
+         0   0   0   0   R   F   T   A
 
          A: 1 = enable absolute tracking
+         T: 1 = enable two finger mode auto correct
+         F: 1 = disable ABS Position Filter
+         R: 1 = enable real hardware resolution
 
 6.2 Native absolute mode 6 byte packet format
     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
index e6a72328154783fc6e983904658eb8eb8c0c4ee8..418020584ccc171b8ff079e496e73383f0f55c29 100644 (file)
@@ -188,6 +188,9 @@ shift
 #define CP_METHODREF 10
 #define CP_INTERFACEMETHODREF 11
 #define CP_NAMEANDTYPE 12
+#define CP_METHODHANDLE 15
+#define CP_METHODTYPE 16
+#define CP_INVOKEDYNAMIC 18
 
 /* Define some commonly used error messages */
 
@@ -242,14 +245,19 @@ void skip_constant(FILE *classfile, u_int16_t *cur)
                break;
        case CP_CLASS:
        case CP_STRING:
+       case CP_METHODTYPE:
                seekerr = fseek(classfile, 2, SEEK_CUR);
                break;
+       case CP_METHODHANDLE:
+               seekerr = fseek(classfile, 3, SEEK_CUR);
+               break;
        case CP_INTEGER:
        case CP_FLOAT:
        case CP_FIELDREF:
        case CP_METHODREF:
        case CP_INTERFACEMETHODREF:
        case CP_NAMEANDTYPE:
+       case CP_INVOKEDYNAMIC:
                seekerr = fseek(classfile, 4, SEEK_CUR);
                break;
        case CP_LONG:
index 43842177b771d72e67e90361f79b28966435787f..30a8ad0dae535cf1670a77d7f4c869e5ffc4c6dc 100644 (file)
@@ -2218,10 +2218,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        noreplace-smp   [X86-32,SMP] Don't replace SMP instructions
                        with UP alternatives
 
-       nordrand        [X86] Disable the direct use of the RDRAND
-                       instruction even if it is supported by the
-                       processor.  RDRAND is still available to user
-                       space applications.
+       nordrand        [X86] Disable kernel use of the RDRAND and
+                       RDSEED instructions even if they are supported
+                       by the processor.  RDRAND and RDSEED are still
+                       available to user space applications.
 
        noresume        [SWSUSP] Disables resume and restores original swap
                        space.
index 81f940f4e88480d48c35fd7707d679d646ef0af8..e3ba753cb714949c4e26f898da31b6d0e6b03737 100644 (file)
@@ -277,7 +277,7 @@ Possible BPF extensions are shown in the following table:
   mark                                  skb->mark
   queue                                 skb->queue_mapping
   hatype                                skb->dev->type
-  rxhash                                skb->rxhash
+  rxhash                                skb->hash
   cpu                                   raw_smp_processor_id()
   vlan_tci                              vlan_tx_tag_get(skb)
   vlan_pr                               vlan_tx_tag_present(skb)
index 6fea79efb4cbfd31cc1d0155aef320b94b89da9e..38112d512f47db9ec9a70edae7c7df83b7d13119 100644 (file)
@@ -578,7 +578,7 @@ processes. This also works in combination with mmap(2) on packet sockets.
 
 Currently implemented fanout policies are:
 
-  - PACKET_FANOUT_HASH: schedule to socket by skb's rxhash
+  - PACKET_FANOUT_HASH: schedule to socket by skb's packet hash
   - PACKET_FANOUT_LB: schedule to socket by round-robin
   - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
   - PACKET_FANOUT_RND: schedule to socket by random selection
index a9380ba54c8e984a997edb65fae8be9fef02f761..b4f53653c106d40d648930776e3258278c476932 100644 (file)
@@ -2126,7 +2126,7 @@ into the hash PTE second double word).
 4.75 KVM_IRQFD
 
 Capability: KVM_CAP_IRQFD
-Architectures: x86
+Architectures: x86 s390
 Type: vm ioctl
 Parameters: struct kvm_irqfd (in)
 Returns: 0 on success, -1 on error
index 51ebb779c5f33ee1693e740066bfbd1c004fa819..8ccf31c95a15e92848a1d15740c5c85d8899b9f1 100644 (file)
@@ -537,7 +537,7 @@ L:  linux-alpha@vger.kernel.org
 F:     arch/alpha/
 
 ALTERA TRIPLE SPEED ETHERNET DRIVER
-M:     Vince Bridgers <vbridgers2013@gmail.com
+M:     Vince Bridgers <vbridgers2013@gmail.com>
 L:     netdev@vger.kernel.org
 L:     nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
 S:     Maintained
@@ -1893,14 +1893,15 @@ L:      netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/broadcom/bnx2x/
 
-BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
+BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
 M:     Christian Daudt <bcm@fixthebug.org>
 M:     Matt Porter <mporter@linaro.org>
 L:     bcm-kernel-feedback-list@broadcom.com
-T:     git git://git.github.com/broadcom/bcm11351
+T:     git git://github.com/broadcom/mach-bcm
 S:     Maintained
 F:     arch/arm/mach-bcm/
 F:     arch/arm/boot/dts/bcm113*
+F:     arch/arm/boot/dts/bcm216*
 F:     arch/arm/boot/dts/bcm281*
 F:     arch/arm/configs/bcm_defconfig
 F:     drivers/mmc/host/sdhci_bcm_kona.c
@@ -2245,12 +2246,6 @@ L:       linux-usb@vger.kernel.org
 S:     Maintained
 F:     drivers/usb/host/ohci-ep93xx.c
 
-CIRRUS LOGIC CS4270 SOUND DRIVER
-M:     Timur Tabi <timur@tabi.org>
-L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
-S:     Odd Fixes
-F:     sound/soc/codecs/cs4270*
-
 CIRRUS LOGIC AUDIO CODEC DRIVERS
 M:     Brian Austin <brian.austin@cirrus.com>
 M:     Paul Handrigan <Paul.Handrigan@cirrus.com>
@@ -4818,6 +4813,14 @@ L:       linux-kernel@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
 F:     kernel/irq/
+
+IRQCHIP DRIVERS
+M:     Thomas Gleixner <tglx@linutronix.de>
+M:     Jason Cooper <jason@lakedaemon.net>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
+T:     git git://git.infradead.org/users/jcooper/linux.git irqchip/core
 F:     drivers/irqchip/
 
 IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
@@ -5490,15 +5493,15 @@ F:      Documentation/hwmon/ltc4261
 F:     drivers/hwmon/ltc4261.c
 
 LTP (Linux Test Project)
-M:     Shubham Goyal <shubham@linux.vnet.ibm.com>
 M:     Mike Frysinger <vapier@gentoo.org>
 M:     Cyril Hrubis <chrubis@suse.cz>
-M:     Caspar Zhang <caspar@casparzhang.com>
 M:     Wanlong Gao <gaowanlong@cn.fujitsu.com>
+M:     Jan Stancek <jstancek@redhat.com>
+M:     Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com>
+M:     Alexey Kodanev <alexey.kodanev@oracle.com>
 L:     ltp-list@lists.sourceforge.net (subscribers-only)
-W:     http://ltp.sourceforge.net/
+W:     http://linux-test-project.github.io/
 T:     git git://github.com/linux-test-project/ltp.git
-T:     git git://ltp.git.sourceforge.net/gitroot/ltp/ltp-dev
 S:     Maintained
 
 M32R ARCHITECTURE
@@ -6511,10 +6514,10 @@ T:      git git://openrisc.net/~jonas/linux
 F:     arch/openrisc/
 
 OPENVSWITCH
-M:     Jesse Gross <jesse@nicira.com>
+M:     Pravin Shelar <pshelar@nicira.com>
 L:     dev@openvswitch.org
 W:     http://openvswitch.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pshelar/openvswitch.git
 S:     Maintained
 F:     net/openvswitch/
 
@@ -7402,6 +7405,14 @@ F:       drivers/rpmsg/
 F:     Documentation/rpmsg.txt
 F:     include/linux/rpmsg.h
 
+RESET CONTROLLER FRAMEWORK
+M:     Philipp Zabel <p.zabel@pengutronix.de>
+S:     Maintained
+F:     drivers/reset/
+F:     Documentation/devicetree/bindings/reset/
+F:     include/linux/reset.h
+F:     include/linux/reset-controller.h
+
 RFKILL
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linux-wireless@vger.kernel.org
@@ -9107,6 +9118,9 @@ F:        arch/um/os-Linux/drivers/
 
 TURBOCHANNEL SUBSYSTEM
 M:     "Maciej W. Rozycki" <macro@linux-mips.org>
+M:     Ralf Baechle <ralf@linux-mips.org>
+L:     linux-mips@linux-mips.org
+Q:     http://patchwork.linux-mips.org/project/linux-mips/list/
 S:     Maintained
 F:     drivers/tc/
 F:     include/linux/tc.h
index 28a7259e0f3b53ed0ee9242fcea45abc71c004f1..cf3412d78ff139c99458bf923872e7a57440853e 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc7
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
index cb6811e5ae5a9a148321d4e667dc34cde9bc9846..7ad75b4e066358f5229d93d0d017c754a711ca32 100644 (file)
                        compatible = "ti,edma3";
                        ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
                        reg =   <0x49000000 0x10000>,
-                               <0x44e10f90 0x10>;
+                               <0x44e10f90 0x40>;
                        interrupts = <12 13 14>;
                        #dma-cells = <1>;
                        dma-channels = <64>;
index 788391f916844130d8d6cdb2722479163863a707..5a452fdd7c5d9711cec9f6d0196f90b0726fefc3 100644 (file)
        };
 };
 
+&iva {
+       status = "disabled";
+};
+
+&mailbox {
+       status = "disabled";
+};
+
+&mmu_isp {
+       status = "disabled";
+};
+
+&smartreflex_mpu_iva {
+       status = "disabled";
+};
+
 /include/ "am35xx-clocks.dtsi"
 /include/ "omap36xx-am35xx-omap3430es2plus-clocks.dtsi"
index df8798e8bd255bbf789da0b44f5229b9f3dc80dd..a055f7f0f14ae5a3e080489c9c424bd37be0e1c0 100644 (file)
        status = "okay";
 };
 
+&gpio5 {
+       status = "okay";
+       ti,no-reset-on-init;
+};
+
 &mmc1 {
        status = "okay";
        vmmc-supply = <&vmmcsd_fixed>;
index 82f238a9063ffe47d10dbe083f136e1876f8efd2..3383c4b668035737e1812777fed34a03f5f9ef7c 100644 (file)
@@ -67,6 +67,7 @@
                        i2c@11000 {
                                pinctrl-0 = <&i2c0_pins>;
                                pinctrl-names = "default";
+                               clock-frequency = <100000>;
                                status = "okay";
                                audio_codec: audio-codec@4a {
                                        compatible = "cirrus,cs42l51";
index 9378d3136b41d7b37f11abdf01186c019758f7f1..0451124e8ebf49af45b34072ae69a02a6fa133a9 100644 (file)
                                };
                        };
 
+                       sata@a0000 {
+                               status = "okay";
+                               nr-ports = <2>;
+                       };
+
                        nand: nand@d0000 {
                                pinctrl-0 = <&nand_pins>;
                                pinctrl-names = "default";
index 068031f0f263ef081f590eae7ac3da0df629ab2d..6d0f03c98ee919adb2609ad16ee7763fee3c65bc 100644 (file)
@@ -99,7 +99,7 @@
                        pcie@3,0 {
                                device_type = "pci";
                                assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
-                               reg = <0x1000 0 0 0 0>;
+                               reg = <0x1800 0 0 0 0>;
                                #address-cells = <3>;
                                #size-cells = <2>;
                                #interrupt-cells = <1>;
index e2919f02e1d47687c77477b1a9bd389a32a4134e..da801964a25783a5f11d1aa22afe1cd1e2cc9da9 100644 (file)
                        pcie@3,0 {
                                device_type = "pci";
                                assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
-                               reg = <0x1000 0 0 0 0>;
+                               reg = <0x1800 0 0 0 0>;
                                #address-cells = <3>;
                                #size-cells = <2>;
                                #interrupt-cells = <1>;
                        pcie@4,0 {
                                device_type = "pci";
                                assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
-                               reg = <0x1000 0 0 0 0>;
+                               reg = <0x2000 0 0 0 0>;
                                #address-cells = <3>;
                                #size-cells = <2>;
                                #interrupt-cells = <1>;
index 448373c4b0e534c1d2ce08592981f5d38bad39a8..90f0bf6f92715c7335072bda5b508e7800fa89a4 100644 (file)
@@ -49,7 +49,7 @@
                        /* Device Bus parameters are required */
 
                        /* Read parameters */
-                       devbus,bus-width    = <8>;
+                       devbus,bus-width    = <16>;
                        devbus,turn-off-ps  = <60000>;
                        devbus,badr-skew-ps = <0>;
                        devbus,acc-first-ps = <124000>;
index 61bda687f782f65485f958adb8d8ec2822ebde35..0c756421ae6aa5f504b72898c16875bdaab5f8d3 100644 (file)
@@ -59,7 +59,7 @@
                        /* Device Bus parameters are required */
 
                        /* Read parameters */
-                       devbus,bus-width    = <8>;
+                       devbus,bus-width    = <16>;
                        devbus,turn-off-ps  = <60000>;
                        devbus,badr-skew-ps = <0>;
                        devbus,acc-first-ps = <124000>;
                        ethernet@70000 {
                                status = "okay";
                                phy = <&phy0>;
-                               phy-mode = "rgmii-id";
+                               phy-mode = "qsgmii";
                        };
                        ethernet@74000 {
                                status = "okay";
                                phy = <&phy1>;
-                               phy-mode = "rgmii-id";
+                               phy-mode = "qsgmii";
                        };
                        ethernet@30000 {
                                status = "okay";
                                phy = <&phy2>;
-                               phy-mode = "rgmii-id";
+                               phy-mode = "qsgmii";
                        };
                        ethernet@34000 {
                                status = "okay";
                                phy = <&phy3>;
-                               phy-mode = "rgmii-id";
+                               phy-mode = "qsgmii";
                        };
 
                        /* Front-side USB slot */
index 985948ce67b3271a65c9b4a413a99e7d255b12e7..5d42feb3104983a2ee9b21be33b59cc2e66a1bcd 100644 (file)
@@ -39,7 +39,7 @@
                        /* Device Bus parameters are required */
 
                        /* Read parameters */
-                       devbus,bus-width    = <8>;
+                       devbus,bus-width    = <16>;
                        devbus,turn-off-ps  = <60000>;
                        devbus,badr-skew-ps = <0>;
                        devbus,acc-first-ps = <124000>;
index ce1375595e5f27117c37881bcafa5fc5805b1455..4537259ce5299baf8cf68978d7c2d106c5f67c32 100644 (file)
@@ -34,7 +34,7 @@
                        };
 
                        spi0: spi@f0004000 {
-                               cs-gpios = <&pioD 13 0>;
+                               cs-gpios = <&pioD 13 0>, <0>, <0>, <&pioD 16 0>;
                                status = "okay";
                        };
 
@@ -79,7 +79,7 @@
                        };
 
                        spi1: spi@f8008000 {
-                               cs-gpios = <&pioC 25 0>, <0>, <0>, <&pioD 16 0>;
+                               cs-gpios = <&pioC 25 0>;
                                status = "okay";
                        };
 
index 366fc2cbcd64c3f56495d104e5a744315c7c4345..c0e0eae16a279f65dfc979d5d10735ca5b097c38 100644 (file)
                                trigger@3 {
                                        reg = <3>;
                                        trigger-name = "external";
-                                       trigger-value = <0x13>;
+                                       trigger-value = <0xd>;
                                        trigger-external;
                                };
                        };
index e21dda0e8986574b2531c1675eca1b4de164822d..3be973e9889a2b0ef29cc45657ad6eeb056e7f5f 100644 (file)
@@ -10,7 +10,7 @@
 #include <dt-bindings/pinctrl/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 
 / {
        model = "Atmel AT91SAM9261 family SoC";
index 63e1784d272c556974023fcfd7d46ba7a0510e3d..92a52faebef77cd8eda572d11504c0f83362a428 100644 (file)
@@ -8,7 +8,7 @@
 
 #include "skeleton.dtsi"
 #include <dt-bindings/pinctrl/at91.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/gpio/gpio.h>
 
index 9583563dd0ef7a3f0d4cd89765a20bb243b10117..8a558b7ac99980b0c3ec2c50dc791341f70028c8 100644 (file)
                status = "okay";
 
                ak8975@0c {
-                       compatible = "ak,ak8975";
+                       compatible = "asahi-kasei,ak8975";
                        reg = <0x0c>;
                        gpios = <&gpj0 7 0>;
                };
index 090f9830b129b72259e4976be890917182b8a6f5..cde19c818667a50170bbd8562a3a943f05df619f 100644 (file)
                                        regulator-name = "VDD_IOPERI_1.8V";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
+                                       regulator-always-on;
                                        op_mode = <1>;
                                };
 
index 80a3bf4c59865e0403a4197abb5ae6f0bc00d363..896a2a6619e0a72aed8c0f474c721e8294a38a3d 100644 (file)
                        gpio-key,wakeup;
                };
        };
-
-       amba {
-               mdma1: mdma@11C10000 {
-                       /*
-                        * MDMA1 can support both secure and non-secure
-                        * AXI transactions. When this is enabled in the kernel
-                        * for boards that run in secure mode, we are getting
-                        * imprecise external aborts causing the kernel to oops.
-                        */
-                       status = "disabled";
-               };
-       };
 };
index c3a9a66c57678f9a5dddd75e11e55be913e3c3c0..b69fbcb7dcb8accc3fc2f7abd713dc754b9fd5ad 100644 (file)
                reg = <0x100440C0 0x20>;
        };
 
-       mau_pd: power-domain@100440E0 {
-               compatible = "samsung,exynos4210-pd";
-               reg = <0x100440E0 0x20>;
-       };
-
-       g2d_pd: power-domain@10044100 {
-               compatible = "samsung,exynos4210-pd";
-               reg = <0x10044100 0x20>;
-       };
-
        msc_pd: power-domain@10044120 {
                compatible = "samsung,exynos4210-pd";
                reg = <0x10044120 0x20>;
                        #dma-cells = <1>;
                        #dma-channels = <8>;
                        #dma-requests = <1>;
+                       /*
+                        * MDMA1 can support both secure and non-secure
+                        * AXI transactions. When this is enabled in the kernel
+                        * for boards that run in secure mode, we are getting
+                        * imprecise external aborts causing the kernel to oops.
+                        */
+                       status = "disabled";
                };
        };
 
        spi_0: spi@12d20000 {
                compatible = "samsung,exynos4210-spi";
                reg = <0x12d20000 0x100>;
-               interrupts = <0 66 0>;
+               interrupts = <0 68 0>;
                dmas = <&pdma0 5
                        &pdma0 4>;
                dma-names = "tx", "rx";
        spi_1: spi@12d30000 {
                compatible = "samsung,exynos4210-spi";
                reg = <0x12d30000 0x100>;
-               interrupts = <0 67 0>;
+               interrupts = <0 69 0>;
                dmas = <&pdma1 5
                        &pdma1 4>;
                dma-names = "tx", "rx";
        spi_2: spi@12d40000 {
                compatible = "samsung,exynos4210-spi";
                reg = <0x12d40000 0x100>;
-               interrupts = <0 68 0>;
+               interrupts = <0 70 0>;
                dmas = <&pdma0 7
                        &pdma0 6>;
                dma-names = "tx", "rx";
                interrupts = <0 112 0>;
                clocks = <&clock 471>;
                clock-names = "secss";
-               samsung,power-domain = <&g2d_pd>;
        };
 };
index 7c8c129698929151512f282c5baad14c69f51309..a3431d7848709aac0463a27e4471c22dd17a4a46 100644 (file)
 &tve {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_vga_sync_1>;
-       i2c-ddc-bus = <&i2c3>;
+       ddc-i2c-bus = <&i2c3>;
        fsl,tve-mode = "vga";
        fsl,hsync-pin = <4>;
        fsl,vsync-pin = <6>;
index 9c2bff2252d0d078514348ab2bc92aad7ea5a6fb..6a1bf4ff83d5516dac0016ebc2bdd21d05565631 100644 (file)
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "fsl,imx53-ipu";
-                       reg = <0x18000000 0x080000000>;
+                       reg = <0x18000000 0x08000000>;
                        interrupts = <11 10>;
                        clocks = <&clks IMX5_CLK_IPU_GATE>,
                                 <&clks IMX5_CLK_IPU_DI0_GATE>,
index 32c6fb4a11624c05756e4e2fe24b05c62c799667..b939f4f52d16a7c0edb677a86558bacd94fec831 100644 (file)
                bootargs = "console=ttyS0,115200n8 earlyprintk";
        };
 
+       mbus {
+               pcie-controller {
+                       status = "okay";
+
+                       pcie@1,0 {
+                               status = "okay";
+                       };
+               };
+        };
+
        ocp@f1000000 {
                pinctrl@10000 {
                        pmx_usb_led: pmx-usb-led {
                ehci@50000 {
                        status = "okay";
                };
-
-               pcie-controller {
-                       status = "okay";
-
-                       pcie@1,0 {
-                               status = "okay";
-                       };
-               };
        };
 
        gpio-leds {
index aa78c2d11fe738fc843f716bd1a6f98ff1054a5b..e2cc85cc3b87e805a113489d3d95cf912d682224 100644 (file)
@@ -4,6 +4,16 @@
 / {
        model = "ZyXEL NSA310";
 
+       mbus {
+               pcie-controller {
+                       status = "okay";
+
+                       pcie@1,0 {
+                               status = "okay";
+                       };
+               };
+       };
+
        ocp@f1000000 {
                pinctrl: pinctrl@10000 {
 
                        status = "okay";
                        nr-ports = <2>;
                };
-
-               pcie-controller {
-                       status = "okay";
-
-                       pcie@1,0 {
-                               status = "okay";
-                       };
-               };
        };
 
        gpio_poweroff {
index 7d1c7677a18f18c3fb1cc00463420545a5b6cb78..0bd70d928c69ba7f7798d1492f6ce3844af9d6ad 100644 (file)
 
                i2c@11000 {
                        status = "okay";
-
-                       alc5621: alc5621@1a {
-                               compatible = "realtek,alc5621";
-                               reg = <0x1a>;
-                       };
                };
 
                serial@12000 {
index f577b7df9a29e4f5f4e74ca86aef4b4ba61ceb87..521c587acaee9f679ab6f9200c5f8be8eee240e9 100644 (file)
                compatible = "smsc,lan9221", "smsc,lan9115";
                bank-width = <2>;
                gpmc,mux-add-data;
-               gpmc,cs-on-ns = <0>;
-               gpmc,cs-rd-off-ns = <186>;
-               gpmc,cs-wr-off-ns = <186>;
-               gpmc,adv-on-ns = <12>;
-               gpmc,adv-rd-off-ns = <48>;
+               gpmc,cs-on-ns = <1>;
+               gpmc,cs-rd-off-ns = <180>;
+               gpmc,cs-wr-off-ns = <180>;
+               gpmc,adv-rd-off-ns = <18>;
                gpmc,adv-wr-off-ns = <48>;
                gpmc,oe-on-ns = <54>;
                gpmc,oe-off-ns = <168>;
                gpmc,we-off-ns = <168>;
                gpmc,rd-cycle-ns = <186>;
                gpmc,wr-cycle-ns = <186>;
-               gpmc,access-ns = <114>;
-               gpmc,page-burst-access-ns = <6>;
-               gpmc,bus-turnaround-ns = <12>;
-               gpmc,cycle2cycle-delay-ns = <18>;
-               gpmc,wr-data-mux-bus-ns = <90>;
-               gpmc,wr-access-ns = <186>;
+               gpmc,access-ns = <144>;
+               gpmc,page-burst-access-ns = <24>;
+               gpmc,bus-turnaround-ns = <90>;
+               gpmc,cycle2cycle-delay-ns = <90>;
                gpmc,cycle2cycle-samecsen;
                gpmc,cycle2cycle-diffcsen;
                vddvario-supply = <&vddvario>;
index 22f35ea142c199082afdd8ba626bd7c5e0b6cd54..8f8c07da4ac148d550ae45cd3501d4a992f4e7c8 100644 (file)
                        interrupts = <58>;
                };
 
-               mailbox: mailbox@48094000 {
-                       compatible = "ti,omap2-mailbox";
-                       ti,hwmods = "mailbox";
-                       reg = <0x48094000 0x200>;
-                       interrupts = <26>;
-               };
-
                intc: interrupt-controller@1 {
                        compatible = "ti,omap2-intc";
                        interrupt-controller;
index 85b1fb014c4314efe82eca9fcb7dfa7e482cb8d2..2d9979835f241f2153cd3d0b52c5c8b0b6c49559 100644 (file)
                        dma-names = "tx", "rx";
                };
 
+               mailbox: mailbox@48094000 {
+                       compatible = "ti,omap2-mailbox";
+                       reg = <0x48094000 0x200>;
+                       interrupts = <26>, <34>;
+                       interrupt-names = "dsp", "iva";
+                       ti,hwmods = "mailbox";
+               };
+
                timer1: timer@48028000 {
                        compatible = "ti,omap2420-timer";
                        reg = <0x48028000 0x400>;
index d09697dab55e80063a737361c65138822bbdf828..42d2c61c9e2d7dc1851054a45ec305820cc8a9f4 100644 (file)
                        dma-names = "tx", "rx";
                };
 
+               mailbox: mailbox@48094000 {
+                       compatible = "ti,omap2-mailbox";
+                       reg = <0x48094000 0x200>;
+                       interrupts = <26>;
+                       ti,hwmods = "mailbox";
+               };
+
                timer1: timer@49018000 {
                        compatible = "ti,omap2420-timer";
                        reg = <0x49018000 0x400>;
index d00055809e31d79b9d1730c06efe02c7e2e6f747..25ba08331d8852e6701c96a4b9e8e54def19f0d8 100644 (file)
                        cpu0-supply = <&vcc>;
                };
        };
-
-       vddvario: regulator-vddvario {
-               compatible = "regulator-fixed";
-               regulator-name = "vddvario";
-               regulator-always-on;
-       };
-
-       vdd33a: regulator-vdd33a {
-               compatible = "regulator-fixed";
-               regulator-name = "vdd33a";
-               regulator-always-on;
-       };
 };
 
 &omap3_pmx_core {
 
        hsusb0_pins: pinmux_hsusb0_pins {
                pinctrl-single,pins = <
-                       OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0)               /* hsusb0_clk.hsusb0_clk */
-                       OMAP3_CORE1_IOPAD(0x21a2, PIN_OUTPUT | MUX_MODE0)               /* hsusb0_stp.hsusb0_stp */
-                       OMAP3_CORE1_IOPAD(0x21a4, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_dir.hsusb0_dir */
-                       OMAP3_CORE1_IOPAD(0x21a6, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_nxt.hsusb0_nxt */
-                       OMAP3_CORE1_IOPAD(0x21a8, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data0.hsusb2_data0 */
-                       OMAP3_CORE1_IOPAD(0x21aa, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data1.hsusb0_data1 */
-                       OMAP3_CORE1_IOPAD(0x21ac, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data2.hsusb0_data2 */
-                       OMAP3_CORE1_IOPAD(0x21ae, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data3 */
-                       OMAP3_CORE1_IOPAD(0x21b0, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data4 */
-                       OMAP3_CORE1_IOPAD(0x21b2, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data5 */
-                       OMAP3_CORE1_IOPAD(0x21b4, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data6 */
-                       OMAP3_CORE1_IOPAD(0x21b6, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data7 */
+                       OMAP3_CORE1_IOPAD(0x21a2, PIN_OUTPUT | MUX_MODE0)               /* hsusb0_clk.hsusb0_clk */
+                       OMAP3_CORE1_IOPAD(0x21a4, PIN_OUTPUT | MUX_MODE0)               /* hsusb0_stp.hsusb0_stp */
+                       OMAP3_CORE1_IOPAD(0x21a6, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_dir.hsusb0_dir */
+                       OMAP3_CORE1_IOPAD(0x21a8, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_nxt.hsusb0_nxt */
+                       OMAP3_CORE1_IOPAD(0x21aa, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data0.hsusb2_data0 */
+                       OMAP3_CORE1_IOPAD(0x21ac, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data1.hsusb0_data1 */
+                       OMAP3_CORE1_IOPAD(0x21ae, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data2.hsusb0_data2 */
+                       OMAP3_CORE1_IOPAD(0x21b0, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data3 */
+                       OMAP3_CORE1_IOPAD(0x21b2, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data4 */
+                       OMAP3_CORE1_IOPAD(0x21b4, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data5 */
+                       OMAP3_CORE1_IOPAD(0x21b6, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data6 */
+                       OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data7 */
                >;
        };
 };
 
+#include "omap-gpmc-smsc911x.dtsi"
+
 &gpmc {
        ranges = <5 0 0x2c000000 0x01000000>;
 
-       smsc1: ethernet@5,0 {
+       smsc1: ethernet@gpmc {
                compatible = "smsc,lan9221", "smsc,lan9115";
                pinctrl-names = "default";
                pinctrl-0 = <&smsc1_pins>;
                interrupt-parent = <&gpio6>;
                interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
                reg = <5 0 0xff>;
-               bank-width = <2>;
-               gpmc,mux-add-data;
-               gpmc,cs-on-ns = <0>;
-               gpmc,cs-rd-off-ns = <186>;
-               gpmc,cs-wr-off-ns = <186>;
-               gpmc,adv-on-ns = <12>;
-               gpmc,adv-rd-off-ns = <48>;
-               gpmc,adv-wr-off-ns = <48>;
-               gpmc,oe-on-ns = <54>;
-               gpmc,oe-off-ns = <168>;
-               gpmc,we-on-ns = <54>;
-               gpmc,we-off-ns = <168>;
-               gpmc,rd-cycle-ns = <186>;
-               gpmc,wr-cycle-ns = <186>;
-               gpmc,access-ns = <114>;
-               gpmc,page-burst-access-ns = <6>;
-               gpmc,bus-turnaround-ns = <12>;
-               gpmc,cycle2cycle-delay-ns = <18>;
-               gpmc,wr-data-mux-bus-ns = <90>;
-               gpmc,wr-access-ns = <186>;
-               gpmc,cycle2cycle-samecsen;
-               gpmc,cycle2cycle-diffcsen;
-               vddvario-supply = <&vddvario>;
-               vdd33a-supply = <&vdd33a>;
-               reg-io-width = <4>;
-               smsc,save-mac-address;
        };
 };
 
index b97736d98a6427f087c11bd510909b1007f8c152..e2d163bf061975bff9ac5a6e83ecf6bf4ef98ed3 100644 (file)
                >;
        };
 
-       smsc911x_pins: pinmux_smsc911x_pins {
+       smsc9221_pins: pinmux_smsc9221_pins {
                pinctrl-single,pins = <
                        0x1a2 (PIN_INPUT | MUX_MODE4)           /* mcspi1_cs2.gpio_176 */
                >;
index 7abd64f6ae21465c9ac74b22563f8f828a5d684b..b22caaaf774ba710461fcabf395c121c0ccc0482 100644 (file)
@@ -10,7 +10,7 @@
  */
 
 #include "omap3-igep.dtsi"
-#include "omap-gpmc-smsc911x.dtsi"
+#include "omap-gpmc-smsc9221.dtsi"
 
 / {
        model = "IGEPv2 (TI OMAP AM/DM37x)";
 
        ethernet@gpmc {
                pinctrl-names = "default";
-               pinctrl-0 = <&smsc911x_pins>;
+               pinctrl-0 = <&smsc9221_pins>;
                reg = <5 0 0xff>;
                interrupt-parent = <&gpio6>;
                interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
index 7909c51b05a5643563b4ed74405066e1a222a995..d59e3de1441e2f7dc5e0fb9b4bc29f49ee04cbc1 100644 (file)
@@ -2,20 +2,6 @@
  * Common support for CompuLab SB-T35 used on SBC-T3530, SBC-T3517 and SBC-T3730
  */
 
-/ {
-       vddvario_sb_t35: regulator-vddvario-sb-t35 {
-               compatible = "regulator-fixed";
-               regulator-name = "vddvario";
-               regulator-always-on;
-       };
-
-       vdd33a_sb_t35: regulator-vdd33a-sb-t35 {
-               compatible = "regulator-fixed";
-               regulator-name = "vdd33a";
-               regulator-always-on;
-       };
-};
-
 &omap3_pmx_core {
        smsc2_pins: pinmux_smsc2_pins {
                pinctrl-single,pins = <
                reg = <4 0 0xff>;
                bank-width = <2>;
                gpmc,mux-add-data;
-               gpmc,cs-on-ns = <0>;
-               gpmc,cs-rd-off-ns = <186>;
-               gpmc,cs-wr-off-ns = <186>;
-               gpmc,adv-on-ns = <12>;
-               gpmc,adv-rd-off-ns = <48>;
+               gpmc,cs-on-ns = <1>;
+               gpmc,cs-rd-off-ns = <180>;
+               gpmc,cs-wr-off-ns = <180>;
+               gpmc,adv-rd-off-ns = <18>;
                gpmc,adv-wr-off-ns = <48>;
                gpmc,oe-on-ns = <54>;
                gpmc,oe-off-ns = <168>;
                gpmc,we-off-ns = <168>;
                gpmc,rd-cycle-ns = <186>;
                gpmc,wr-cycle-ns = <186>;
-               gpmc,access-ns = <114>;
-               gpmc,page-burst-access-ns = <6>;
-               gpmc,bus-turnaround-ns = <12>;
-               gpmc,cycle2cycle-delay-ns = <18>;
-               gpmc,wr-data-mux-bus-ns = <90>;
-               gpmc,wr-access-ns = <186>;
+               gpmc,access-ns = <144>;
+               gpmc,page-burst-access-ns = <24>;
+               gpmc,bus-turnaround-ns = <90>;
+               gpmc,cycle2cycle-delay-ns = <90>;
                gpmc,cycle2cycle-samecsen;
                gpmc,cycle2cycle-diffcsen;
-               vddvario-supply = <&vddvario_sb_t35>;
-               vdd33a-supply = <&vdd33a_sb_t35>;
+               vddvario-supply = <&vddvario>;
+               vdd33a-supply = <&vdd33a>;
                reg-io-width = <4>;
                smsc,save-mac-address;
        };
index 024c9c6c682d7eb4b421a559c7c4b5f95e0511f9..42189b65d393d29d2cf467ddbc5df4eb0661a471 100644 (file)
@@ -8,6 +8,19 @@
 / {
        model = "CompuLab SBC-T3517 with CM-T3517";
        compatible = "compulab,omap3-sbc-t3517", "compulab,omap3-cm-t3517", "ti,am3517", "ti,omap3";
+
+       /* Only one GPMC smsc9220 on SBC-T3517, CM-T3517 uses am35x Ethernet */
+       vddvario: regulator-vddvario-sb-t35 {
+               compatible = "regulator-fixed";
+               regulator-name = "vddvario";
+               regulator-always-on;
+       };
+
+       vdd33a: regulator-vdd33a-sb-t35 {
+               compatible = "regulator-fixed";
+               regulator-name = "vdd33a";
+               regulator-always-on;
+       };
 };
 
 &omap3_pmx_core {
index acb9019dc437b66321ec7995dc16456678a2e42c..4231191ade06acf8c7a02938a0fcfcbdeb318fbc 100644 (file)
@@ -61,7 +61,7 @@
                        ti,hwmods = "mpu";
                };
 
-               iva {
+               iva: iva {
                        compatible = "ti,iva2.2";
                        ti,hwmods = "iva";
 
index f8c9855ce587c15790f79a2f04e0ff02933f463f..36b4312a5e0d82fb20fb6a7eb9b1d59942b0ec18 100644 (file)
                        status = "disabled";
                };
 
+               mailbox: mailbox@4a0f4000 {
+                       compatible = "ti,omap4-mailbox";
+                       reg = <0x4a0f4000 0x200>;
+                       interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+                       ti,hwmods = "mailbox";
+               };
+
                timer1: timer@4ae18000 {
                        compatible = "ti,omap5430-timer";
                        reg = <0x4ae18000 0x80>;
index eabcfdbb403acc7ff40b409617a53c9831414c04..a106b0872910da874f1000f417a1aab1793237de 100644 (file)
@@ -13,7 +13,7 @@
 #include <dt-bindings/pinctrl/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 
 / {
        model = "Atmel SAMA5D3 family SoC";
index b029fe7ef17a657946de4d2fe71168b4b03210d8..1b02208ea6ff2b70aab43ddd416b37ef0b9a2a1f 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <dt-bindings/pinctrl/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 
 / {
        ahb {
index 382b04431f66b621e01a9f2fe7a9e488ac4c171b..02848453ca0cf5447de27aaca6b233611173da4e 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <dt-bindings/pinctrl/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 
 / {
        aliases {
index a9fa75e4165205f9a1259f9a0821519faee54070..7a8d4c6115f72fdab533980a0f0d96eb6cb1469a 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <dt-bindings/pinctrl/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 
 / {
        aliases {
index 7f3baf51a3a9e933d3a47dce18d2ca102c104a9b..32dd55e5f4e6b8567a8a2d207b69311959c9f9a1 100644 (file)
@@ -18,6 +18,7 @@
        compatible = "st-ericsson,ccu8540", "st-ericsson,u8540";
 
        memory@0 {
+               device_type = "memory";
                reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>;
        };
 
index 32efc105df834de3c3143938bf0cfdebbf5d1b55..aba1c8a3f3883320a50b31dcac05341568e0a335 100644 (file)
@@ -87,7 +87,7 @@
 
                pll4: clk@01c20018 {
                        #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-pll1-clk";
+                       compatible = "allwinner,sun7i-a20-pll4-clk";
                        reg = <0x01c20018 0x4>;
                        clocks = <&osc24M>;
                        clock-output-names = "pll4";
                        clock-output-names = "pll6_sata", "pll6_other", "pll6";
                };
 
+               pll8: clk@01c20040 {
+                       #clock-cells = <0>;
+                       compatible = "allwinner,sun7i-a20-pll4-clk";
+                       reg = <0x01c20040 0x4>;
+                       clocks = <&osc24M>;
+                       clock-output-names = "pll8";
+               };
+
                cpu: cpu@01c20054 {
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-cpu-clk";
                        status = "disabled";
                };
 
-               i2c4: i2c@01c2bc00 {
+               i2c4: i2c@01c2c000 {
                        compatible = "allwinner,sun4i-i2c";
-                       reg = <0x01c2bc00 0x400>;
+                       reg = <0x01c2c000 0x400>;
                        interrupts = <0 89 4>;
                        clocks = <&apb1_gates 15>;
                        clock-frequency = <100000>;
index f01c0ee0c87ebd94debc320f5714bf247dfe5ab7..490f3dced749956d6f63f94ba74cebc6f55a4964 100644 (file)
@@ -433,8 +433,12 @@ static void bL_switcher_restore_cpus(void)
 {
        int i;
 
-       for_each_cpu(i, &bL_switcher_removed_logical_cpus)
-               cpu_up(i);
+       for_each_cpu(i, &bL_switcher_removed_logical_cpus) {
+               struct device *cpu_dev = get_cpu_device(i);
+               int ret = device_online(cpu_dev);
+               if (ret)
+                       dev_err(cpu_dev, "switcher: unable to restore CPU\n");
+       }
 }
 
 static int bL_switcher_halve_cpus(void)
@@ -521,7 +525,7 @@ static int bL_switcher_halve_cpus(void)
                        continue;
                }
 
-               ret = cpu_down(i);
+               ret = device_offline(get_cpu_device(i));
                if (ret) {
                        bL_switcher_restore_cpus();
                        return ret;
index 41bca32409fce81358c3b5c35bc081bcc28e7c76..5339009b3c0ce648df92244b5d9274e78273290b 100644 (file)
@@ -1423,55 +1423,38 @@ EXPORT_SYMBOL(edma_clear_event);
 
 #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES)
 
-static int edma_of_read_u32_to_s16_array(const struct device_node *np,
-                                        const char *propname, s16 *out_values,
-                                        size_t sz)
+static int edma_xbar_event_map(struct device *dev, struct device_node *node,
+                              struct edma_soc_info *pdata, size_t sz)
 {
-       int ret;
-
-       ret = of_property_read_u16_array(np, propname, out_values, sz);
-       if (ret)
-               return ret;
-
-       /* Terminate it */
-       *out_values++ = -1;
-       *out_values++ = -1;
-
-       return 0;
-}
-
-static int edma_xbar_event_map(struct device *dev,
-                              struct device_node *node,
-                              struct edma_soc_info *pdata, int len)
-{
-       int ret, i;
+       const char pname[] = "ti,edma-xbar-event-map";
        struct resource res;
        void __iomem *xbar;
-       const s16 (*xbar_chans)[2];
+       s16 (*xbar_chans)[2];
+       size_t nelm = sz / sizeof(s16);
        u32 shift, offset, mux;
+       int ret, i;
 
-       xbar_chans = devm_kzalloc(dev,
-                                 len/sizeof(s16) + 2*sizeof(s16),
-                                 GFP_KERNEL);
+       xbar_chans = devm_kzalloc(dev, (nelm + 2) * sizeof(s16), GFP_KERNEL);
        if (!xbar_chans)
                return -ENOMEM;
 
        ret = of_address_to_resource(node, 1, &res);
        if (ret)
-               return -EIO;
+               return -ENOMEM;
 
        xbar = devm_ioremap(dev, res.start, resource_size(&res));
        if (!xbar)
                return -ENOMEM;
 
-       ret = edma_of_read_u32_to_s16_array(node,
-                                           "ti,edma-xbar-event-map",
-                                           (s16 *)xbar_chans,
-                                           len/sizeof(u32));
+       ret = of_property_read_u16_array(node, pname, (u16 *)xbar_chans, nelm);
        if (ret)
                return -EIO;
 
-       for (i = 0; xbar_chans[i][0] != -1; i++) {
+       /* Invalidate last entry for the other user of this mess */
+       nelm >>= 1;
+       xbar_chans[nelm][0] = xbar_chans[nelm][1] = -1;
+
+       for (i = 0; i < nelm; i++) {
                shift = (xbar_chans[i][1] & 0x03) << 3;
                offset = xbar_chans[i][1] & 0xfffffffc;
                mux = readl(xbar + offset);
@@ -1480,8 +1463,7 @@ static int edma_xbar_event_map(struct device *dev,
                writel(mux, (xbar + offset));
        }
 
-       pdata->xbar_chans = xbar_chans;
-
+       pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
        return 0;
 }
 
index 4ce7b70ea9011634de2bcb9f1b0ff4f29e046ecd..e07a227ec0dbb331bae2428427dc36fec444adec 100644 (file)
@@ -65,6 +65,7 @@ CONFIG_TCG_TIS_I2C_INFINEON=y
 CONFIG_I2C=y
 CONFIG_I2C_MUX=y
 CONFIG_I2C_ARB_GPIO_CHALLENGE=y
+CONFIG_I2C_EXYNOS5=y
 CONFIG_I2C_S3C2410=y
 CONFIG_DEBUG_GPIO=y
 # CONFIG_HWMON is not set
index b5df4a511b0acdfea6df30b7dd4b9753d2b00f9a..81ba78eaf54adb02840dbc2d41cc4af8acc7c08a 100644 (file)
@@ -37,7 +37,7 @@ CONFIG_SUN4I_EMAC=y
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
+CONFIG_STMMAC_ETH=y
 # CONFIG_NET_VENDOR_WIZNET is not set
 # CONFIG_WLAN is not set
 CONFIG_SERIAL_8250=y
index b5f7705abcb024bb21c41ce34c6f7e95ddfb9bb8..624e1d436c6ce8d387bef77963c1315bc702a938 100644 (file)
@@ -54,7 +54,9 @@ static inline void register_trusted_foundations(
         */
        pr_err("No support for Trusted Foundations, continuing in degraded mode.\n");
        pr_err("Secondary processors as well as CPU PM will be disabled.\n");
+#if IS_ENABLED(CONFIG_SMP)
        setup_max_cpus = 0;
+#endif
        cpu_idle_poll_ctrl(true);
 }
 
index 12c3a5decc609d882626ec75bb2e82ebcbb74030..75d95799b6e6df7238dd425dc66da51ddb006633 100644 (file)
@@ -171,8 +171,9 @@ extern int __put_user_8(void *, unsigned long long);
 #define __put_user_check(x,p)                                                  \
        ({                                                              \
                unsigned long __limit = current_thread_info()->addr_limit - 1; \
+               const typeof(*(p)) __user *__tmp_p = (p);               \
                register const typeof(*(p)) __r2 asm("r2") = (x);       \
-               register const typeof(*(p)) __user *__p asm("r0") = (p);\
+               register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
                register unsigned long __l asm("r1") = __limit;         \
                register int __e asm("r0");                             \
                switch (sizeof(*(__p))) {                               \
index cf4f3e867395ef0261b16a4d721d8cff740c1de2..ded062f9b358038c05fa074816706c40619e6408 100644 (file)
@@ -77,7 +77,6 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
 }
 /* VIRT <-> MACHINE conversion */
 #define virt_to_machine(v)     (phys_to_machine(XPADDR(__pa(v))))
-#define virt_to_pfn(v)          (PFN_DOWN(__pa(v)))
 #define virt_to_mfn(v)         (pfn_to_mfn(virt_to_pfn(v)))
 #define mfn_to_virt(m)         (__va(mfn_to_pfn(m) << PAGE_SHIFT))
 
index 1420725142cab1817d26bc3ff7003ae4a4833732..efb208de75ec55067882afb0b2be768b78637a54 100644 (file)
        orrne   r5, V7M_xPSR_FRAMEPTRALIGN
        biceq   r5, V7M_xPSR_FRAMEPTRALIGN
 
+       @ ensure bit 0 is cleared in the PC, otherwise behaviour is
+       @ unpredictable
+       bic     r4, #1
+
        @ write basic exception frame
        stmdb   r2!, {r1, r3-r5}
        ldmia   sp, {r1, r3-r5}
index 3c217694ebecb126b23f226688bcc6874b0c8c7b..cb791ac6a0037dfd392622eb5adf24467bcedc37 100644 (file)
@@ -285,7 +285,7 @@ static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl,
                if (unwind_pop_register(ctrl, &vsp, reg))
                                return -URC_FAILURE;
 
-       if (insn & 0x80)
+       if (insn & 0x8)
                if (unwind_pop_register(ctrl, &vsp, 14))
                                return -URC_FAILURE;
 
index a0282928e9c10bdbc67b385423b225f8e5315756..7cd6f19945ed737198f50f04db777348e9e7a3b8 100644 (file)
@@ -1308,19 +1308,19 @@ static struct platform_device at91_adc_device = {
 static struct at91_adc_trigger at91_adc_triggers[] = {
        [0] = {
                .name = "timer-counter-0",
-               .value = AT91_ADC_TRGSEL_TC0 | AT91_ADC_TRGEN,
+               .value = 0x1,
        },
        [1] = {
                .name = "timer-counter-1",
-               .value = AT91_ADC_TRGSEL_TC1 | AT91_ADC_TRGEN,
+               .value = 0x3,
        },
        [2] = {
                .name = "timer-counter-2",
-               .value = AT91_ADC_TRGSEL_TC2 | AT91_ADC_TRGEN,
+               .value = 0x5,
        },
        [3] = {
                .name = "external",
-               .value = AT91_ADC_TRGSEL_EXTERNAL | AT91_ADC_TRGEN,
+               .value = 0xd,
                .is_external = true,
        },
 };
index 932129ef26c66054a2bdeecaef9c0af1a9bba788..aa01c4222b40334db58a603cbcbee82f5f671d6b 100644 (file)
@@ -18,6 +18,8 @@
 
 #include <mach/map.h>
 
+#include <plat/cpu.h>
+
 #include "smc.h"
 
 static int exynos_do_idle(void)
@@ -28,13 +30,24 @@ static int exynos_do_idle(void)
 
 static int exynos_cpu_boot(int cpu)
 {
+       /*
+        * The second parameter of SMC_CMD_CPU1BOOT command means CPU id.
+        * But, Exynos4212 has only one secondary CPU so second parameter
+        * isn't used for informing secure firmware about CPU id.
+        */
+       if (soc_is_exynos4212())
+               cpu = 0;
+
        exynos_smc(SMC_CMD_CPU1BOOT, cpu, 0, 0);
        return 0;
 }
 
 static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
 {
-       void __iomem *boot_reg = S5P_VA_SYSRAM_NS + 0x1c + 4*cpu;
+       void __iomem *boot_reg = S5P_VA_SYSRAM_NS + 0x1c;
+
+       if (!soc_is_exynos4212())
+               boot_reg += 4*cpu;
 
        __raw_writel(boot_addr, boot_reg);
        return 0;
index fc4dd7cedc1189019dea590e8a4caa6b0ebf6884..6bd7c3f37ac08e139118e48f200a075cb9fbac8b 100644 (file)
@@ -77,7 +77,7 @@ struct platform_device *__init imx_alloc_mx3_camera(
 
        pdev = platform_device_alloc("mx3-camera", 0);
        if (!pdev)
-               goto err;
+               return ERR_PTR(-ENOMEM);
 
        pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
        if (!pdev->dev.dma_mask)
index f3d4cf53f7466ba6c44f5ef1d91484e3ce8f62e6..09520e19b78e46d441e288001d923448b2cd0995 100644 (file)
@@ -108,7 +108,18 @@ static int __init mvebu_soc_id_init(void)
        iounmap(pci_base);
 
 res_ioremap:
-       clk_disable_unprepare(clk);
+       /*
+        * If the PCIe unit is actually enabled and we have PCI
+        * support in the kernel, we intentionally do not release the
+        * reference to the clock. We want to keep it running since
+        * the bootloader does some PCIe link configuration that the
+        * kernel is for now unable to do, and gating the clock would
+        * make us loose this precious configuration.
+        */
+       if (!of_device_is_available(child) || !IS_ENABLED(CONFIG_PCI_MVEBU)) {
+               clk_disable_unprepare(clk);
+               clk_put(clk);
+       }
 
 clk_err:
        of_node_put(child);
index ac82512b9c8c641e044596f737cbe3a518eefe74..b6885e42c0a0749279cdac1babd1e0825f23742a 100644 (file)
@@ -142,7 +142,7 @@ __init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs,
        board_nand_data.nr_parts        = nr_parts;
        board_nand_data.devsize         = nand_type;
 
-       board_nand_data.ecc_opt = OMAP_ECC_BCH8_CODE_HW;
+       board_nand_data.ecc_opt = OMAP_ECC_HAM1_CODE_HW;
        gpmc_nand_init(&board_nand_data, gpmc_t);
 }
 #endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */
index 8f5121b89688396d1b3fb4551dc55fc0c4395c00..eb8c75ec3b1ac28ad121969fdbbd586bfca69b20 100644 (file)
@@ -456,7 +456,8 @@ static struct clk_hw_omap dpll4_m5x2_ck_hw = {
        .clkdm_name     = "dpll4_clkdm",
 };
 
-DEFINE_STRUCT_CLK(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, dpll4_m5x2_ck_ops);
+DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names,
+                       dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
 
 static struct clk dpll4_m5x2_ck_3630 = {
        .name           = "dpll4_m5x2_ck",
index 01fc710c81818e9d48e2b26ca8742b77b392ff1c..2498ab025fa296416c03494df6070daf456451ee 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/cpuidle.h>
 #include <linux/cpu_pm.h>
 #include <linux/export.h>
+#include <linux/clockchips.h>
 
 #include <asm/cpuidle.h>
 #include <asm/proc-fns.h>
@@ -83,6 +84,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
 {
        struct idle_statedata *cx = state_ptr + index;
        u32 mpuss_can_lose_context = 0;
+       int cpu_id = smp_processor_id();
 
        /*
         * CPU0 has to wait and stay ON until CPU1 is OFF state.
@@ -110,6 +112,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
        mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
                                 (cx->mpu_logic_state == PWRDM_POWER_OFF);
 
+       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
+
        /*
         * Call idle CPU PM enter notifier chain so that
         * VFP and per CPU interrupt context is saved.
@@ -165,6 +169,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
        if (dev->cpu == 0 && mpuss_can_lose_context)
                cpu_cluster_pm_exit();
 
+       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
+
 fail:
        cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
        cpu_done[dev->cpu] = false;
@@ -172,6 +178,16 @@ fail:
        return index;
 }
 
+/*
+ * For each cpu, setup the broadcast timer because local timers
+ * stops for the states above C1.
+ */
+static void omap_setup_broadcast_timer(void *arg)
+{
+       int cpu = smp_processor_id();
+       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
+}
+
 static struct cpuidle_driver omap4_idle_driver = {
        .name                           = "omap4_idle",
        .owner                          = THIS_MODULE,
@@ -189,8 +205,7 @@ static struct cpuidle_driver omap4_idle_driver = {
                        /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
                        .exit_latency = 328 + 440,
                        .target_residency = 960,
-                       .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
-                                CPUIDLE_FLAG_TIMER_STOP,
+                       .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
                        .enter = omap_enter_idle_coupled,
                        .name = "C2",
                        .desc = "CPUx OFF, MPUSS CSWR",
@@ -199,8 +214,7 @@ static struct cpuidle_driver omap4_idle_driver = {
                        /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
                        .exit_latency = 460 + 518,
                        .target_residency = 1100,
-                       .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
-                                CPUIDLE_FLAG_TIMER_STOP,
+                       .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
                        .enter = omap_enter_idle_coupled,
                        .name = "C3",
                        .desc = "CPUx OFF, MPUSS OSWR",
@@ -231,5 +245,8 @@ int __init omap4_idle_init(void)
        if (!cpu_clkdm[0] || !cpu_clkdm[1])
                return -ENODEV;
 
+       /* Configure the broadcast timer on each cpu */
+       on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
+
        return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
 }
index 75e92952c18efe3e597791e6798c2ee2ce5969a1..40c5d5f1451cd34b19ebed5e2d58f150f6f4a29d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Secondary CPU startup routine source file.
  *
- * Copyright (C) 2009 Texas Instruments, Inc.
+ * Copyright (C) 2009-2014 Texas Instruments, Inc.
  *
  * Author:
  *      Santosh Shilimkar <santosh.shilimkar@ti.com>
  * code.  This routine also provides a holding flag into which
  * secondary core is held until we're ready for it to initialise.
  * The primary core will update this flag using a hardware
-+ * register AuxCoreBoot0.
+ * register AuxCoreBoot0.
  */
 ENTRY(omap5_secondary_startup)
+.arm
+THUMB( adr     r9, BSYM(wait)  )       @ CPU may be entered in ARM mode.
+THUMB( bx      r9              )       @ If this is a Thumb-2 kernel,
+THUMB( .thumb                  )       @ switch to Thumb now.
 wait:  ldr     r2, =AUX_CORE_BOOT0_PA  @ read from AuxCoreBoot0
        ldr     r0, [r2]
        mov     r0, r0, lsr #5
index 892317294fdc0812965910a05f1d895d4c2a03cd..e829664e6a6ca24d4c76ebac35e9edfe8e87780c 100644 (file)
@@ -895,7 +895,7 @@ static struct omap_hwmod omap54xx_mcpdm_hwmod = {
         * current exception.
         */
 
-       .flags          = HWMOD_EXT_OPT_MAIN_CLK,
+       .flags          = HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE,
        .main_clk       = "pad_clks_ck",
        .prcm = {
                .omap4 = {
index f565f9944af2ee45b795b180391853ffa8810a12..7548db2bfb8a7e595d7672c5cfbba3868651d689 100644 (file)
@@ -21,7 +21,7 @@ struct mv_sata_platform_data;
 #define ORION_MBUS_DEVBUS_BOOT_ATTR   0x0f
 #define ORION_MBUS_DEVBUS_TARGET(cs)  0x01
 #define ORION_MBUS_DEVBUS_ATTR(cs)    (~(1 << cs))
-#define ORION_MBUS_SRAM_TARGET        0x00
+#define ORION_MBUS_SRAM_TARGET        0x09
 #define ORION_MBUS_SRAM_ATTR          0x00
 
 /*
index 0c93588fcb91b4a74791534e5db93be9c578e5ce..1ca37c72f12fb88934257db8356853b02c350f5f 100644 (file)
@@ -123,6 +123,11 @@ __v7m_setup:
        mov     pc, lr
 ENDPROC(__v7m_setup)
 
+       .align 2
+__v7m_setup_stack:
+       .space  4 * 8                           @ 8 registers
+__v7m_setup_stack_top:
+
        define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
 
        .section ".rodata"
@@ -152,6 +157,3 @@ __v7m_proc_info:
        .long   nop_cache_fns           @ proc_info_list.cache
        .size   __v7m_proc_info, . - __v7m_proc_info
 
-__v7m_setup_stack:
-       .space  4 * 8                           @ 8 registers
-__v7m_setup_stack_top:
index 5f5b975887fc8424723d6566d687c82cf54328bc..b5608b1f9fbdf5da32bf354643c1c842a755df83 100644 (file)
@@ -70,6 +70,7 @@ static u32 errata;
 
 static struct omap_dma_global_context_registers {
        u32 dma_irqenable_l0;
+       u32 dma_irqenable_l1;
        u32 dma_ocp_sysconfig;
        u32 dma_gcr;
 } omap_dma_global_context;
@@ -1973,10 +1974,17 @@ static struct irqaction omap24xx_dma_irq;
 
 /*----------------------------------------------------------------------------*/
 
+/*
+ * Note that we are currently using only IRQENABLE_L0 and L1.
+ * As the DSP may be using IRQENABLE_L2 and L3, let's not
+ * touch those for now.
+ */
 void omap_dma_global_context_save(void)
 {
        omap_dma_global_context.dma_irqenable_l0 =
                p->dma_read(IRQENABLE_L0, 0);
+       omap_dma_global_context.dma_irqenable_l1 =
+               p->dma_read(IRQENABLE_L1, 0);
        omap_dma_global_context.dma_ocp_sysconfig =
                p->dma_read(OCP_SYSCONFIG, 0);
        omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
@@ -1991,6 +1999,8 @@ void omap_dma_global_context_restore(void)
                OCP_SYSCONFIG, 0);
        p->dma_write(omap_dma_global_context.dma_irqenable_l0,
                IRQENABLE_L0, 0);
+       p->dma_write(omap_dma_global_context.dma_irqenable_l1,
+               IRQENABLE_L1, 0);
 
        if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
                p->dma_write(0x3 , IRQSTATUS_L0, 0);
index e94f9458aa6faa3630d5d2b7cebf9e522b56901d..993bce527b8552d379c62b6082703b1438e80436 100644 (file)
@@ -138,6 +138,7 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define __pa(x)                        __virt_to_phys((unsigned long)(x))
 #define __va(x)                        ((void *)__phys_to_virt((phys_addr_t)(x)))
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
+#define virt_to_pfn(x)      __phys_to_pfn(__virt_to_phys(x))
 
 /*
  *  virt_to_page(k)    convert a _valid_ virtual address to struct page *
index 90c811f05a2e3279a8709211a770725141491dd7..7b1c67a0b485b6ca7fa55e661e898b1938f4de26 100644 (file)
@@ -266,7 +266,7 @@ static inline pmd_t pte_pmd(pte_t pte)
 
 #define pmd_page(pmd)           pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
 
-#define set_pmd_at(mm, addr, pmdp, pmd)        set_pmd(pmdp, pmd)
+#define set_pmd_at(mm, addr, pmdp, pmd)        set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
 
 static inline int has_transparent_hugepage(void)
 {
index 473e5dbf8f39a39e8eaa7a0740e54ee4d6bacb59..0f08dfd69ebc73ea99b7b7f6d68c4b2f320eb2d1 100644 (file)
@@ -97,11 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
        if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
                return false;
 
-       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
-               affinity = cpu_online_mask;
+       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
                ret = true;
-       }
 
+       /*
+        * when using forced irq_set_affinity we must ensure that the cpu
+        * being offlined is not present in the affinity mask, it may be
+        * selected as the target CPU otherwise
+        */
+       affinity = cpu_online_mask;
        c = irq_data_get_irq_chip(d);
        if (!c->irq_set_affinity)
                pr_debug("IRQ%u: unable to set affinity\n", d->irq);
index 5e9aec358306f0c13bdd0bb70758dde88be9e961..31eb959e9aa81d05f16269e3bb500a965cb7cd48 100644 (file)
@@ -51,7 +51,11 @@ int pmd_huge(pmd_t pmd)
 
 int pud_huge(pud_t pud)
 {
+#ifndef __PAGETABLE_PMD_FOLDED
        return !(pud_val(pud) & PUD_TABLE_BIT);
+#else
+       return 0;
+#endif
 }
 
 int pmd_huge_support(void)
index ae763d8bf55acff94a8c611893e2a103f3156f33..fb13dc5e8f8c7c34079664761f6fab1bf802a1c7 100644 (file)
@@ -11,7 +11,7 @@
 
 
 
-#define NR_syscalls                    314 /* length of syscall table */
+#define NR_syscalls                    315 /* length of syscall table */
 
 /*
  * The following defines stop scripts/checksyscalls.sh from complaining about
index 715e85f858de5ea34e7f581b38bc5d9a60ec1304..7de0a2d65da42a09b6d8b34f75f175c326b329e9 100644 (file)
 #define __NR_finit_module              1335
 #define __NR_sched_setattr             1336
 #define __NR_sched_getattr             1337
+#define __NR_renameat2                 1338
 
 #endif /* _UAPI_ASM_IA64_UNISTD_H */
index fa8d61a312a7ee818a300522d9f029c9534d78f6..ba3d03503e84fde7f714fda777acf254356b78e8 100644 (file)
@@ -1775,6 +1775,7 @@ sys_call_table:
        data8 sys_finit_module                  // 1335
        data8 sys_sched_setattr
        data8 sys_sched_getattr
+       data8 sys_renameat2
 
        .org sys_call_table + 8*NR_syscalls     // guard against failures to increase NR_syscalls
 #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
index 9d38b73989eb597d677acd95ea53cf0ddb99b624..33afa56ad47aecd6620bc150eea042705cc85d3e 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            351
+#define NR_syscalls            352
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index b932dd470041c2c5130dfcc44448c80033525259..9cd82fbc7817f716d589368bda3c1d274687a607 100644 (file)
 #define __NR_finit_module      348
 #define __NR_sched_setattr     349
 #define __NR_sched_getattr     350
+#define __NR_renameat2         351
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index b6223dc41d82870953be64b35fc409c4b5634070..501e102127899c6afaa893bd99d4d555f5bab8ec 100644 (file)
@@ -371,4 +371,5 @@ ENTRY(sys_call_table)
        .long sys_finit_module
        .long sys_sched_setattr
        .long sys_sched_getattr         /* 350 */
+       .long sys_renameat2
 
index 5d6b4b407ddab29b677a7aa5328715127dfdad56..2d6f0de7732529212bf4a9256bb3720386e39e71 100644 (file)
@@ -15,6 +15,7 @@ static inline void wr_fence(void)
        volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE;
        barrier();
        *flushptr = 0;
+       barrier();
 }
 
 #else /* CONFIG_METAG_META21 */
@@ -35,6 +36,7 @@ static inline void wr_fence(void)
        *flushptr = 0;
        *flushptr = 0;
        *flushptr = 0;
+       barrier();
 }
 
 #endif /* !CONFIG_METAG_META21 */
@@ -68,6 +70,7 @@ static inline void fence(void)
        volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
        barrier();
        *flushptr = 0;
+       barrier();
 }
 #define smp_mb()        fence()
 #define smp_rmb()       fence()
index f16477d1f571cb134a29e3afd13c480401f340ea..a8a37477c66e22a8e3c29ba2402ff6559a35bf27 100644 (file)
@@ -22,6 +22,8 @@
 /* Add an extra page of padding at the top of the stack for the guard page. */
 #define STACK_TOP      (TASK_SIZE - PAGE_SIZE)
 #define STACK_TOP_MAX  STACK_TOP
+/* Maximum virtual space for stack */
+#define STACK_SIZE_MAX (CONFIG_MAX_STACK_SIZE_MB*1024*1024)
 
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
index 84e09feb4d546fef7004746a8d5550bddb3f8d58..ab78be2b6eb0503f939cc85409d532a3b037b491 100644 (file)
@@ -4,11 +4,11 @@ include include/uapi/asm-generic/Kbuild.asm
 header-y += byteorder.h
 header-y += ech.h
 header-y += ptrace.h
-header-y += resource.h
 header-y += sigcontext.h
 header-y += siginfo.h
 header-y += swab.h
 header-y += unistd.h
 
 generic-y += mman.h
+generic-y += resource.h
 generic-y += setup.h
diff --git a/arch/metag/include/uapi/asm/resource.h b/arch/metag/include/uapi/asm/resource.h
deleted file mode 100644 (file)
index 526d23c..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _UAPI_METAG_RESOURCE_H
-#define _UAPI_METAG_RESOURCE_H
-
-#define _STK_LIM_MAX    (1 << 28)
-#include <asm-generic/resource.h>
-
-#endif /* _UAPI_METAG_RESOURCE_H */
index 5abf4e894216ac4b683772f6b187e4e9337eb2d5..2a66e908f6a9d9a276cad042ac793f692630255e 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/addrspace.h>
 #include <asm/bootinfo.h>
 #include <asm/cpu.h>
+#include <asm/cpu-type.h>
 #include <asm/irq_regs.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
index f434b759e3b9aaa0f6eb9ab07f85fb783de4a6ad..ec606363b80677fd464b92c13d5a905871b21466 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/types.h>
 
 #include <asm/addrspace.h>
+#include <asm/cpu-type.h>
 #include <asm/irq_regs.h>
 #include <asm/ptrace.h>
 #include <asm/traps.h>
index 064ae7a76bdc204c28e0c8e92ccd63400fbbf403..ae73e42ac20b163331a77439a75270320a77dfa8 100644 (file)
@@ -6,4 +6,3 @@
 lib-y                  += init.o memory.o cmdline.o identify.o console.o
 
 lib-$(CONFIG_32BIT)    += locore.o
-lib-$(CONFIG_64BIT)    += call_o32.o
diff --git a/arch/mips/dec/prom/call_o32.S b/arch/mips/dec/prom/call_o32.S
deleted file mode 100644 (file)
index 8c84981..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- *     O32 interface for the 64 (or N32) ABI.
- *
- *     Copyright (C) 2002  Maciej W. Rozycki
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- */
-
-#include <asm/asm.h>
-#include <asm/regdef.h>
-
-/* Maximum number of arguments supported.  Must be even!  */
-#define O32_ARGC       32
-/* Number of static registers we save.  */
-#define O32_STATC      11
-/* Frame size for both of the above.  */
-#define O32_FRAMESZ    (4 * O32_ARGC + SZREG * O32_STATC)
-
-               .text
-
-/*
- * O32 function call dispatcher, for interfacing 32-bit ROM routines.
- *
- * The standard 64 (N32) calling sequence is supported, with a0
- * holding a function pointer, a1-a7 -- its first seven arguments
- * and the stack -- remaining ones (up to O32_ARGC, including a1-a7).
- * Static registers, gp and fp are preserved, v0 holds a result.
- * This code relies on the called o32 function for sp and ra
- * restoration and thus both this dispatcher and the current stack
- * have to be placed in a KSEGx (or KUSEG) address space.  Any
- * pointers passed have to point to addresses within one of these
- * spaces as well.
- */
-NESTED(call_o32, O32_FRAMESZ, ra)
-               REG_SUBU        sp,O32_FRAMESZ
-
-               REG_S           ra,O32_FRAMESZ-1*SZREG(sp)
-               REG_S           fp,O32_FRAMESZ-2*SZREG(sp)
-               REG_S           gp,O32_FRAMESZ-3*SZREG(sp)
-               REG_S           s7,O32_FRAMESZ-4*SZREG(sp)
-               REG_S           s6,O32_FRAMESZ-5*SZREG(sp)
-               REG_S           s5,O32_FRAMESZ-6*SZREG(sp)
-               REG_S           s4,O32_FRAMESZ-7*SZREG(sp)
-               REG_S           s3,O32_FRAMESZ-8*SZREG(sp)
-               REG_S           s2,O32_FRAMESZ-9*SZREG(sp)
-               REG_S           s1,O32_FRAMESZ-10*SZREG(sp)
-               REG_S           s0,O32_FRAMESZ-11*SZREG(sp)
-
-               move            jp,a0
-
-               sll             a0,a1,zero
-               sll             a1,a2,zero
-               sll             a2,a3,zero
-               sll             a3,a4,zero
-               sw              a5,0x10(sp)
-               sw              a6,0x14(sp)
-               sw              a7,0x18(sp)
-
-               PTR_LA          t0,O32_FRAMESZ(sp)
-               PTR_LA          t1,0x1c(sp)
-               li              t2,O32_ARGC-7
-1:
-               lw              t3,(t0)
-               REG_ADDU        t0,SZREG
-               sw              t3,(t1)
-               REG_SUBU        t2,1
-               REG_ADDU        t1,4
-               bnez            t2,1b
-
-               jalr            jp
-
-               REG_L           s0,O32_FRAMESZ-11*SZREG(sp)
-               REG_L           s1,O32_FRAMESZ-10*SZREG(sp)
-               REG_L           s2,O32_FRAMESZ-9*SZREG(sp)
-               REG_L           s3,O32_FRAMESZ-8*SZREG(sp)
-               REG_L           s4,O32_FRAMESZ-7*SZREG(sp)
-               REG_L           s5,O32_FRAMESZ-6*SZREG(sp)
-               REG_L           s6,O32_FRAMESZ-5*SZREG(sp)
-               REG_L           s7,O32_FRAMESZ-4*SZREG(sp)
-               REG_L           gp,O32_FRAMESZ-3*SZREG(sp)
-               REG_L           fp,O32_FRAMESZ-2*SZREG(sp)
-               REG_L           ra,O32_FRAMESZ-1*SZREG(sp)
-
-               REG_ADDU        sp,O32_FRAMESZ
-               jr              ra
-END(call_o32)
index b308b2a0613e210c1f7b068dfc6a7589f1ad2afb..4703fe4dbd9a7b6c192ce3e86d0c5bceada460f1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     O32 interface for the 64 (or N32) ABI.
  *
- *     Copyright (C) 2002  Maciej W. Rozycki
+ *     Copyright (C) 2002, 2014  Maciej W. Rozycki
  *
  *     This program is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License
 #include <asm/asm.h>
 #include <asm/regdef.h>
 
+/* O32 register size.  */
+#define O32_SZREG      4
 /* Maximum number of arguments supported.  Must be even!  */
 #define O32_ARGC       32
-/* Number of static registers we save. */
+/* Number of static registers we save.  */
 #define O32_STATC      11
-/* Frame size for static register  */
-#define O32_FRAMESZ    (SZREG * O32_STATC)
-/* Frame size on new stack */
-#define O32_FRAMESZ_NEW (SZREG + 4 * O32_ARGC)
+/* Argument area frame size.  */
+#define O32_ARGSZ      (O32_SZREG * O32_ARGC)
+/* Static register save area frame size.  */
+#define O32_STATSZ     (SZREG * O32_STATC)
+/* Stack pointer register save area frame size.  */
+#define O32_SPSZ       SZREG
+/* Combined area frame size.  */
+#define O32_FRAMESZ    (O32_ARGSZ + O32_SPSZ + O32_STATSZ)
+/* Switched stack frame size.  */
+#define O32_NFRAMESZ   (O32_ARGSZ + O32_SPSZ)
 
                .text
 
 /*
  * O32 function call dispatcher, for interfacing 32-bit ROM routines.
  *
- * The standard 64 (N32) calling sequence is supported, with a0
- * holding a function pointer, a1 a new stack pointer, a2-a7 -- its
- * first six arguments and the stack -- remaining ones (up to O32_ARGC,
- * including a2-a7). Static registers, gp and fp are preserved, v0 holds
- * a result. This code relies on the called o32 function for sp and ra
- * restoration and this dispatcher has to be placed in a KSEGx (or KUSEG)
- * address space.  Any pointers passed have to point to addresses within
- * one of these spaces as well.
+ * The standard 64 (N32) calling sequence is supported, with a0 holding
+ * a function pointer, a1 a pointer to the new stack to call the
+ * function with or 0 if no stack switching is requested, a2-a7 -- the
+ * function call's first six arguments, and the stack -- the remaining
+ * arguments (up to O32_ARGC, including a2-a7).  Static registers, gp
+ * and fp are preserved, v0 holds the result.  This code relies on the
+ * called o32 function for sp and ra restoration and this dispatcher has
+ * to be placed in a KSEGx (or KUSEG) address space.  Any pointers
+ * passed have to point to addresses within one of these spaces as well.
  */
 NESTED(call_o32, O32_FRAMESZ, ra)
                REG_SUBU        sp,O32_FRAMESZ
@@ -51,32 +60,36 @@ NESTED(call_o32, O32_FRAMESZ, ra)
                REG_S           s0,O32_FRAMESZ-11*SZREG(sp)
 
                move            jp,a0
-               REG_SUBU        s0,a1,O32_FRAMESZ_NEW
-               REG_S           sp,O32_FRAMESZ_NEW-1*SZREG(s0)
+
+               move            fp,sp
+               beqz            a1,0f
+               REG_SUBU        fp,a1,O32_NFRAMESZ
+0:
+               REG_S           sp,O32_NFRAMESZ-1*SZREG(fp)
 
                sll             a0,a2,zero
                sll             a1,a3,zero
                sll             a2,a4,zero
                sll             a3,a5,zero
-               sw              a6,0x10(s0)
-               sw              a7,0x14(s0)
+               sw              a6,4*O32_SZREG(fp)
+               sw              a7,5*O32_SZREG(fp)
 
                PTR_LA          t0,O32_FRAMESZ(sp)
-               PTR_LA          t1,0x18(s0)
+               PTR_LA          t1,6*O32_SZREG(fp)
                li              t2,O32_ARGC-6
 1:
                lw              t3,(t0)
                REG_ADDU        t0,SZREG
                sw              t3,(t1)
                REG_SUBU        t2,1
-               REG_ADDU        t1,4
+               REG_ADDU        t1,O32_SZREG
                bnez            t2,1b
 
-               move            sp,s0
+               move            sp,fp
 
                jalr            jp
 
-               REG_L           sp,O32_FRAMESZ_NEW-1*SZREG(sp)
+               REG_L           sp,O32_NFRAMESZ-1*SZREG(sp)
 
                REG_L           s0,O32_FRAMESZ-11*SZREG(sp)
                REG_L           s1,O32_FRAMESZ-10*SZREG(sp)
index 2c2cb182af4edd8673dc54c00c39aded85a6ed30..6aa264b9856ac99b71b88d54fae19cea27d782de 100644 (file)
@@ -40,7 +40,8 @@
 
 #ifdef CONFIG_64BIT
 
-static u8 o32_stk[16384];
+/* O32 stack has to be 8-byte aligned. */
+static u64 o32_stk[4096];
 #define O32_STK          &o32_stk[sizeof(o32_stk)]
 
 #define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
index c0ead63138453c04d3b20918fbcfb1e1c02c5c3c..b59a2103b61a3f64efd75b29f034e8208c155012 100644 (file)
@@ -113,31 +113,31 @@ extern int (*__pmax_close)(int);
 #define __DEC_PROM_O32(fun, arg) fun arg __asm__(#fun); \
                                 __asm__(#fun " = call_o32")
 
-int __DEC_PROM_O32(_rex_bootinit, (int (*)(void)));
-int __DEC_PROM_O32(_rex_bootread, (int (*)(void)));
-int __DEC_PROM_O32(_rex_getbitmap, (int (*)(memmap *), memmap *));
+int __DEC_PROM_O32(_rex_bootinit, (int (*)(void), void *));
+int __DEC_PROM_O32(_rex_bootread, (int (*)(void), void *));
+int __DEC_PROM_O32(_rex_getbitmap, (int (*)(memmap *), void *, memmap *));
 unsigned long *__DEC_PROM_O32(_rex_slot_address,
-                            (unsigned long *(*)(int), int));
-void *__DEC_PROM_O32(_rex_gettcinfo, (void *(*)(void)));
-int __DEC_PROM_O32(_rex_getsysid, (int (*)(void)));
-void __DEC_PROM_O32(_rex_clear_cache, (void (*)(void)));
-
-int __DEC_PROM_O32(_prom_getchar, (int (*)(void)));
-char *__DEC_PROM_O32(_prom_getenv, (char *(*)(char *), char *));
-int __DEC_PROM_O32(_prom_printf, (int (*)(char *, ...), char *, ...));
-
-
-#define rex_bootinit()         _rex_bootinit(__rex_bootinit)
-#define rex_bootread()         _rex_bootread(__rex_bootread)
-#define rex_getbitmap(x)       _rex_getbitmap(__rex_getbitmap, x)
-#define rex_slot_address(x)    _rex_slot_address(__rex_slot_address, x)
-#define rex_gettcinfo()                _rex_gettcinfo(__rex_gettcinfo)
-#define rex_getsysid()         _rex_getsysid(__rex_getsysid)
-#define rex_clear_cache()      _rex_clear_cache(__rex_clear_cache)
-
-#define prom_getchar()         _prom_getchar(__prom_getchar)
-#define prom_getenv(x)         _prom_getenv(__prom_getenv, x)
-#define prom_printf(x...)      _prom_printf(__prom_printf, x)
+                            (unsigned long *(*)(int), void *, int));
+void *__DEC_PROM_O32(_rex_gettcinfo, (void *(*)(void), void *));
+int __DEC_PROM_O32(_rex_getsysid, (int (*)(void), void *));
+void __DEC_PROM_O32(_rex_clear_cache, (void (*)(void), void *));
+
+int __DEC_PROM_O32(_prom_getchar, (int (*)(void), void *));
+char *__DEC_PROM_O32(_prom_getenv, (char *(*)(char *), void *, char *));
+int __DEC_PROM_O32(_prom_printf, (int (*)(char *, ...), void *, char *, ...));
+
+
+#define rex_bootinit()         _rex_bootinit(__rex_bootinit, NULL)
+#define rex_bootread()         _rex_bootread(__rex_bootread, NULL)
+#define rex_getbitmap(x)       _rex_getbitmap(__rex_getbitmap, NULL, x)
+#define rex_slot_address(x)    _rex_slot_address(__rex_slot_address, NULL, x)
+#define rex_gettcinfo()                _rex_gettcinfo(__rex_gettcinfo, NULL)
+#define rex_getsysid()         _rex_getsysid(__rex_getsysid, NULL)
+#define rex_clear_cache()      _rex_clear_cache(__rex_clear_cache, NULL)
+
+#define prom_getchar()         _prom_getchar(__prom_getchar, NULL)
+#define prom_getenv(x)         _prom_getenv(__prom_getenv, NULL, x)
+#define prom_printf(x...)      _prom_printf(__prom_printf, NULL, x)
 
 #else /* !CONFIG_64BIT */
 
diff --git a/arch/mips/include/asm/rm9k-ocd.h b/arch/mips/include/asm/rm9k-ocd.h
deleted file mode 100644 (file)
index b0b80d9..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *  Copyright (C) 2004 by Basler Vision Technologies AG
- *  Author: Thomas Koeller <thomas.koeller@baslerweb.com>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#if !defined(_ASM_RM9K_OCD_H)
-#define _ASM_RM9K_OCD_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <asm/io.h>
-
-extern volatile void __iomem * const ocd_base;
-extern volatile void __iomem * const titan_base;
-
-#define ocd_addr(__x__)                (ocd_base + (__x__))
-#define titan_addr(__x__)      (titan_base + (__x__))
-#define scram_addr(__x__)      (scram_base + (__x__))
-
-/* OCD register access */
-#define ocd_readl(__offs__) __raw_readl(ocd_addr(__offs__))
-#define ocd_readw(__offs__) __raw_readw(ocd_addr(__offs__))
-#define ocd_readb(__offs__) __raw_readb(ocd_addr(__offs__))
-#define ocd_writel(__val__, __offs__) \
-       __raw_writel((__val__), ocd_addr(__offs__))
-#define ocd_writew(__val__, __offs__) \
-       __raw_writew((__val__), ocd_addr(__offs__))
-#define ocd_writeb(__val__, __offs__) \
-       __raw_writeb((__val__), ocd_addr(__offs__))
-
-/* TITAN register access - 32 bit-wide only */
-#define titan_readl(__offs__) __raw_readl(titan_addr(__offs__))
-#define titan_writel(__val__, __offs__) \
-       __raw_writel((__val__), titan_addr(__offs__))
-
-/* Protect access to shared TITAN registers */
-extern spinlock_t titan_lock;
-extern int titan_irqflags;
-#define lock_titan_regs() spin_lock_irqsave(&titan_lock, titan_irqflags)
-#define unlock_titan_regs() spin_unlock_irqrestore(&titan_lock, titan_irqflags)
-
-#endif /* !defined(_ASM_RM9K_OCD_H) */
index c6e9cd2bca8dbf7de5512c9be8de0f2b92db7bd4..17960fe7a8ce4ef21b7a94cca10c7b5af61cec17 100644 (file)
@@ -133,6 +133,8 @@ static inline int syscall_get_arch(void)
 #ifdef CONFIG_64BIT
        if (!test_thread_flag(TIF_32BIT_REGS))
                arch |= __AUDIT_ARCH_64BIT;
+       if (test_thread_flag(TIF_32BIT_ADDR))
+               arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32;
 #endif
 #if defined(__LITTLE_ENDIAN)
        arch |=  __AUDIT_ARCH_LE;
index df6e775f3fef524e8d049c24433eff06dade0fd7..3125797f2a88a6bc3e44cdc215d416d604e7070d 100644 (file)
@@ -484,13 +484,13 @@ enum MIPS6e_i8_func {
  * Damn ...  bitfields depend from byteorder :-(
  */
 #ifdef __MIPSEB__
-#define BITFIELD_FIELD(field, more)                                    \
+#define __BITFIELD_FIELD(field, more)                                  \
        field;                                                          \
        more
 
 #elif defined(__MIPSEL__)
 
-#define BITFIELD_FIELD(field, more)                                    \
+#define __BITFIELD_FIELD(field, more)                                  \
        more                                                            \
        field;
 
@@ -499,112 +499,112 @@ enum MIPS6e_i8_func {
 #endif
 
 struct j_format {
-       BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */
-       BITFIELD_FIELD(unsigned int target : 26,
+       __BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */
+       __BITFIELD_FIELD(unsigned int target : 26,
        ;))
 };
 
 struct i_format {                      /* signed immediate format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(signed int simmediate : 16,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(signed int simmediate : 16,
        ;))))
 };
 
 struct u_format {                      /* unsigned immediate format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int uimmediate : 16,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int uimmediate : 16,
        ;))))
 };
 
 struct c_format {                      /* Cache (>= R6000) format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int c_op : 3,
-       BITFIELD_FIELD(unsigned int cache : 2,
-       BITFIELD_FIELD(unsigned int simmediate : 16,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int c_op : 3,
+       __BITFIELD_FIELD(unsigned int cache : 2,
+       __BITFIELD_FIELD(unsigned int simmediate : 16,
        ;)))))
 };
 
 struct r_format {                      /* Register format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int rd : 5,
-       BITFIELD_FIELD(unsigned int re : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int rd : 5,
+       __BITFIELD_FIELD(unsigned int re : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct p_format {              /* Performance counter format (R10000) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int rd : 5,
-       BITFIELD_FIELD(unsigned int re : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int rd : 5,
+       __BITFIELD_FIELD(unsigned int re : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct f_format {                      /* FPU register format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int : 1,
-       BITFIELD_FIELD(unsigned int fmt : 4,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int rd : 5,
-       BITFIELD_FIELD(unsigned int re : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int : 1,
+       __BITFIELD_FIELD(unsigned int fmt : 4,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int rd : 5,
+       __BITFIELD_FIELD(unsigned int re : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;)))))))
 };
 
 struct ma_format {             /* FPU multiply and add format (MIPS IV) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int fr : 5,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int func : 4,
-       BITFIELD_FIELD(unsigned int fmt : 2,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int fr : 5,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int func : 4,
+       __BITFIELD_FIELD(unsigned int fmt : 2,
        ;)))))))
 };
 
 struct b_format {                      /* BREAK and SYSCALL */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int code : 20,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int code : 20,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;)))
 };
 
 struct ps_format {                     /* MIPS-3D / paired single format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct v_format {                              /* MDMX vector format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int sel : 4,
-       BITFIELD_FIELD(unsigned int fmt : 1,
-       BITFIELD_FIELD(unsigned int vt : 5,
-       BITFIELD_FIELD(unsigned int vs : 5,
-       BITFIELD_FIELD(unsigned int vd : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int sel : 4,
+       __BITFIELD_FIELD(unsigned int fmt : 1,
+       __BITFIELD_FIELD(unsigned int vt : 5,
+       __BITFIELD_FIELD(unsigned int vs : 5,
+       __BITFIELD_FIELD(unsigned int vd : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;)))))))
 };
 
 struct spec3_format {   /* SPEC3 */
-       BITFIELD_FIELD(unsigned int opcode:6,
-       BITFIELD_FIELD(unsigned int rs:5,
-       BITFIELD_FIELD(unsigned int rt:5,
-       BITFIELD_FIELD(signed int simmediate:9,
-       BITFIELD_FIELD(unsigned int func:7,
+       __BITFIELD_FIELD(unsigned int opcode:6,
+       __BITFIELD_FIELD(unsigned int rs:5,
+       __BITFIELD_FIELD(unsigned int rt:5,
+       __BITFIELD_FIELD(signed int simmediate:9,
+       __BITFIELD_FIELD(unsigned int func:7,
        ;)))))
 };
 
@@ -616,141 +616,141 @@ struct spec3_format {   /* SPEC3 */
  *     if it is MIPS32 instruction re-encoded for use in the microMIPS ASE.
  */
 struct fb_format {             /* FPU branch format (MIPS32) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int bc : 5,
-       BITFIELD_FIELD(unsigned int cc : 3,
-       BITFIELD_FIELD(unsigned int flag : 2,
-       BITFIELD_FIELD(signed int simmediate : 16,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int bc : 5,
+       __BITFIELD_FIELD(unsigned int cc : 3,
+       __BITFIELD_FIELD(unsigned int flag : 2,
+       __BITFIELD_FIELD(signed int simmediate : 16,
        ;)))))
 };
 
 struct fp0_format {            /* FPU multiply and add format (MIPS32) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int fmt : 5,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int fmt : 5,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_fp0_format {         /* FPU multipy and add format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int fmt : 3,
-       BITFIELD_FIELD(unsigned int op : 2,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int fmt : 3,
+       __BITFIELD_FIELD(unsigned int op : 2,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;)))))))
 };
 
 struct fp1_format {            /* FPU mfc1 and cfc1 format (MIPS32) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int op : 5,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int op : 5,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_fp1_format {         /* FPU mfc1 and cfc1 format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fmt : 2,
-       BITFIELD_FIELD(unsigned int op : 8,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fmt : 2,
+       __BITFIELD_FIELD(unsigned int op : 8,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_fp2_format {         /* FPU movt and movf format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int cc : 3,
-       BITFIELD_FIELD(unsigned int zero : 2,
-       BITFIELD_FIELD(unsigned int fmt : 2,
-       BITFIELD_FIELD(unsigned int op : 3,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int cc : 3,
+       __BITFIELD_FIELD(unsigned int zero : 2,
+       __BITFIELD_FIELD(unsigned int fmt : 2,
+       __BITFIELD_FIELD(unsigned int op : 3,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))))
 };
 
 struct mm_fp3_format {         /* FPU abs and neg format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fmt : 3,
-       BITFIELD_FIELD(unsigned int op : 7,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fmt : 3,
+       __BITFIELD_FIELD(unsigned int op : 7,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_fp4_format {         /* FPU c.cond format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int cc : 3,
-       BITFIELD_FIELD(unsigned int fmt : 3,
-       BITFIELD_FIELD(unsigned int cond : 4,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int cc : 3,
+       __BITFIELD_FIELD(unsigned int fmt : 3,
+       __BITFIELD_FIELD(unsigned int cond : 4,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;)))))))
 };
 
 struct mm_fp5_format {         /* FPU lwxc1 and swxc1 format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int index : 5,
-       BITFIELD_FIELD(unsigned int base : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int op : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int index : 5,
+       __BITFIELD_FIELD(unsigned int base : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int op : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct fp6_format {            /* FPU madd and msub format (MIPS IV) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int fr : 5,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int fr : 5,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_fp6_format {         /* FPU madd and msub format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int fr : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int fr : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_i_format {           /* Immediate format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(signed int simmediate : 16,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(signed int simmediate : 16,
        ;))))
 };
 
 struct mm_m_format {           /* Multi-word load/store format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rd : 5,
-       BITFIELD_FIELD(unsigned int base : 5,
-       BITFIELD_FIELD(unsigned int func : 4,
-       BITFIELD_FIELD(signed int simmediate : 12,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rd : 5,
+       __BITFIELD_FIELD(unsigned int base : 5,
+       __BITFIELD_FIELD(unsigned int func : 4,
+       __BITFIELD_FIELD(signed int simmediate : 12,
        ;)))))
 };
 
 struct mm_x_format {           /* Scaled indexed load format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int index : 5,
-       BITFIELD_FIELD(unsigned int base : 5,
-       BITFIELD_FIELD(unsigned int rd : 5,
-       BITFIELD_FIELD(unsigned int func : 11,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int index : 5,
+       __BITFIELD_FIELD(unsigned int base : 5,
+       __BITFIELD_FIELD(unsigned int rd : 5,
+       __BITFIELD_FIELD(unsigned int func : 11,
        ;)))))
 };
 
@@ -758,51 +758,51 @@ struct mm_x_format {              /* Scaled indexed load format (microMIPS) */
  * microMIPS instruction formats (16-bit length)
  */
 struct mm_b0_format {          /* Unconditional branch format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(signed int simmediate : 10,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(signed int simmediate : 10,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;)))
 };
 
 struct mm_b1_format {          /* Conditional branch format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 3,
-       BITFIELD_FIELD(signed int simmediate : 7,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 3,
+       __BITFIELD_FIELD(signed int simmediate : 7,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;))))
 };
 
 struct mm16_m_format {         /* Multi-word load/store format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int func : 4,
-       BITFIELD_FIELD(unsigned int rlist : 2,
-       BITFIELD_FIELD(unsigned int imm : 4,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int func : 4,
+       __BITFIELD_FIELD(unsigned int rlist : 2,
+       __BITFIELD_FIELD(unsigned int imm : 4,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;)))))
 };
 
 struct mm16_rb_format {                /* Signed immediate format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 3,
-       BITFIELD_FIELD(unsigned int base : 3,
-       BITFIELD_FIELD(signed int simmediate : 4,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 3,
+       __BITFIELD_FIELD(unsigned int base : 3,
+       __BITFIELD_FIELD(signed int simmediate : 4,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;)))))
 };
 
 struct mm16_r3_format {                /* Load from global pointer format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 3,
-       BITFIELD_FIELD(signed int simmediate : 7,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 3,
+       __BITFIELD_FIELD(signed int simmediate : 7,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;))))
 };
 
 struct mm16_r5_format {                /* Load/store from stack pointer format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(signed int simmediate : 5,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(signed int simmediate : 5,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;))))
 };
 
@@ -810,57 +810,57 @@ struct mm16_r5_format {           /* Load/store from stack pointer format */
  * MIPS16e instruction formats (16-bit length)
  */
 struct m16e_rr {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int rx : 3,
-       BITFIELD_FIELD(unsigned int nd : 1,
-       BITFIELD_FIELD(unsigned int l : 1,
-       BITFIELD_FIELD(unsigned int ra : 1,
-       BITFIELD_FIELD(unsigned int func : 5,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int rx : 3,
+       __BITFIELD_FIELD(unsigned int nd : 1,
+       __BITFIELD_FIELD(unsigned int l : 1,
+       __BITFIELD_FIELD(unsigned int ra : 1,
+       __BITFIELD_FIELD(unsigned int func : 5,
        ;))))))
 };
 
 struct m16e_jal {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int x : 1,
-       BITFIELD_FIELD(unsigned int imm20_16 : 5,
-       BITFIELD_FIELD(signed int imm25_21 : 5,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int x : 1,
+       __BITFIELD_FIELD(unsigned int imm20_16 : 5,
+       __BITFIELD_FIELD(signed int imm25_21 : 5,
        ;))))
 };
 
 struct m16e_i64 {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int func : 3,
-       BITFIELD_FIELD(unsigned int imm : 8,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int func : 3,
+       __BITFIELD_FIELD(unsigned int imm : 8,
        ;)))
 };
 
 struct m16e_ri64 {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int func : 3,
-       BITFIELD_FIELD(unsigned int ry : 3,
-       BITFIELD_FIELD(unsigned int imm : 5,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int func : 3,
+       __BITFIELD_FIELD(unsigned int ry : 3,
+       __BITFIELD_FIELD(unsigned int imm : 5,
        ;))))
 };
 
 struct m16e_ri {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int rx : 3,
-       BITFIELD_FIELD(unsigned int imm : 8,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int rx : 3,
+       __BITFIELD_FIELD(unsigned int imm : 8,
        ;)))
 };
 
 struct m16e_rri {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int rx : 3,
-       BITFIELD_FIELD(unsigned int ry : 3,
-       BITFIELD_FIELD(unsigned int imm : 5,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int rx : 3,
+       __BITFIELD_FIELD(unsigned int ry : 3,
+       __BITFIELD_FIELD(unsigned int imm : 5,
        ;))))
 };
 
 struct m16e_i8 {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int func : 3,
-       BITFIELD_FIELD(unsigned int imm : 8,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int func : 3,
+       __BITFIELD_FIELD(unsigned int imm : 8,
        ;)))
 };
 
index d6e154a9e6a55ef98d964f71629a129f6fd04d27..2692abb28e3637db7705217de88239fb968b2935 100644 (file)
 #define __NR_finit_module              (__NR_Linux + 348)
 #define __NR_sched_setattr             (__NR_Linux + 349)
 #define __NR_sched_getattr             (__NR_Linux + 350)
+#define __NR_renameat2                 (__NR_Linux + 351)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            350
+#define __NR_Linux_syscalls            351
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_getdents64                        (__NR_Linux + 308)
 #define __NR_sched_setattr             (__NR_Linux + 309)
 #define __NR_sched_getattr             (__NR_Linux + 310)
+#define __NR_renameat2                 (__NR_Linux + 311)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            310
+#define __NR_Linux_syscalls            311
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_finit_module              (__NR_Linux + 312)
 #define __NR_sched_setattr             (__NR_Linux + 313)
 #define __NR_sched_getattr             (__NR_Linux + 314)
+#define __NR_renameat2                 (__NR_Linux + 315)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            314
+#define __NR_Linux_syscalls            315
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
index e40971b51d2f0bf47e3eb217f43c652c732e7a09..037a44d962f37e1b94251f13b054103e6cbb1ff5 100644 (file)
@@ -124,14 +124,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        seq_printf(m, "kscratch registers\t: %d\n",
                      hweight8(cpu_data[n].kscratch_mask));
        seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
-#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
-       if (cpu_has_mipsmt) {
-               seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
-#if defined(CONFIG_MIPS_MT_SMTC)
-               seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id);
-#endif
-       }
-#endif
+
        sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
                      cpu_has_vce ? "%u" : "not available");
        seq_printf(m, fmt, 'D', vced_count);
index fdc70b40044265fb2107df186eb0e593a96b9571..3245474f19d5cdec5563a3841ecdcd3760f98e17 100644 (file)
@@ -577,3 +577,4 @@ EXPORT(sys_call_table)
        PTR     sys_finit_module
        PTR     sys_sched_setattr
        PTR     sys_sched_getattr               /* 4350 */
+       PTR     sys_renameat2
index dd99c3285aeae75f65ae982de46a755ae953f140..be2fedd4ae33193937010b376e62c7d9e327022d 100644 (file)
@@ -430,4 +430,5 @@ EXPORT(sys_call_table)
        PTR     sys_getdents64
        PTR     sys_sched_setattr
        PTR     sys_sched_getattr               /* 5310 */
+       PTR     sys_renameat2
        .size   sys_call_table,.-sys_call_table
index f68d2f4f009021de3ed784e9733aee5d3580d8e0..c1dbcda4b816844cc64821aa8d64a79d5c9a38f3 100644 (file)
@@ -423,4 +423,5 @@ EXPORT(sysn32_call_table)
        PTR     sys_finit_module
        PTR     sys_sched_setattr
        PTR     sys_sched_getattr
+       PTR     sys_renameat2                   /* 6315 */
        .size   sysn32_call_table,.-sysn32_call_table
index 70f6acecd928896c54b586b3470c27eb703721be..f1343ccd7ed7e58d14563c4413ede6517ff60853 100644 (file)
@@ -556,4 +556,5 @@ EXPORT(sys32_call_table)
        PTR     sys_finit_module
        PTR     sys_sched_setattr
        PTR     sys_sched_getattr               /* 4350 */
+       PTR     sys_renameat2
        .size   sys32_call_table,.-sys32_call_table
index fac1f5b178ebe3c558ad369a9e1243340da04c80..143b8a37b5e41358f3faa814375869b058c9fee7 100644 (file)
@@ -8,6 +8,7 @@
        };
 
        memory@0 {
+               device_type = "memory";
                reg = <0x0 0x2000000>;
        };
 
index 2e4825e483882b217046caf4a847a99c3db56afe..9901237563c58922d213b9af10b9ed3c504b18e5 100644 (file)
 #define UNIT(unit)  ((unit)*NBYTES)
 
 #define ADDC(sum,reg)                                          \
+       .set    push;                                           \
+       .set    noat;                                           \
        ADD     sum, reg;                                       \
        sltu    v1, sum, reg;                                   \
        ADD     sum, v1;                                        \
+       .set    pop
 
 #define ADDC32(sum,reg)                                                \
+       .set    push;                                           \
+       .set    noat;                                           \
        addu    sum, reg;                                       \
        sltu    v1, sum, reg;                                   \
        addu    sum, v1;                                        \
+       .set    pop
 
 #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)   \
        LOAD    _t0, (offset + UNIT(0))(src);                   \
@@ -710,6 +716,8 @@ LEAF(csum_partial)
        ADDC(sum, t2)
 .Ldone\@:
        /* fold checksum */
+       .set    push
+       .set    noat
 #ifdef USE_DOUBLE
        dsll32  v1, sum, 0
        daddu   sum, v1
@@ -732,6 +740,7 @@ LEAF(csum_partial)
        or      sum, sum, t0
 1:
 #endif
+       .set    pop
        .set reorder
        ADDC32(sum, psum)
        jr      ra
index 44713af15a62bc60ebc53ffcbc9e73ad10d87380..705cfb7c1a74e0843e40567219024882ab932ac9 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 1994 by Waldorf Electronics
  * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2007  Maciej W. Rozycki
+ * Copyright (C) 2007, 2014 Maciej W. Rozycki
  */
 #include <linux/module.h>
 #include <linux/param.h>
 #include <asm/compiler.h>
 #include <asm/war.h>
 
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+#define GCC_DADDI_IMM_ASM() "I"
+#else
+#define GCC_DADDI_IMM_ASM() "r"
+#endif
+
 void __delay(unsigned long loops)
 {
        __asm__ __volatile__ (
@@ -22,13 +28,13 @@ void __delay(unsigned long loops)
        "       .align  3                                       \n"
        "1:     bnez    %0, 1b                                  \n"
 #if BITS_PER_LONG == 32
-       "       subu    %0,                                   \n"
+       "       subu    %0, %1                                  \n"
 #else
-       "       dsubu   %0,                                   \n"
+       "       dsubu   %0, %1                                  \n"
 #endif
        "       .set    reorder                                 \n"
        : "=r" (loops)
-       : "0" (loops));
+       : GCC_DADDI_IMM_ASM() (1), "0" (loops));
 }
 EXPORT_SYMBOL(__delay);
 
index d3301cd1e9a51b4c387a7f8c3d4a46e2632761e2..3c32baf8b49447a591e6e552e634eafe5be8d09e 100644 (file)
@@ -35,7 +35,6 @@ LEAF(__strncpy_from_\func\()_asm)
        bnez            v0, .Lfault\@
 
 FEXPORT(__strncpy_from_\func\()_nocheck_asm)
-       .set            noreorder
        move            t0, zero
        move            v1, a1
 .ifeqs "\func","kernel"
@@ -45,21 +44,21 @@ FEXPORT(__strncpy_from_\func\()_nocheck_asm)
 .endif
        PTR_ADDIU       v1, 1
        R10KCBARRIER(0(ra))
+       sb              v0, (a0)
        beqz            v0, 2f
-        sb             v0, (a0)
        PTR_ADDIU       t0, 1
+       PTR_ADDIU       a0, 1
        bne             t0, a2, 1b
-        PTR_ADDIU      a0, 1
 2:     PTR_ADDU        v0, a1, t0
        xor             v0, a1
        bltz            v0, .Lfault\@
-        nop
+       move            v0, t0
        jr              ra                      # return n
-        move           v0, t0
        END(__strncpy_from_\func\()_asm)
 
-.Lfault\@: jr          ra
-         li            v0, -EFAULT
+.Lfault\@:
+       li              v0, -EFAULT
+       jr              ra
 
        .section        __ex_table,"a"
        PTR             1b, .Lfault\@
index 7397be226a06a2a7d0481fac7b2dd476d4fe6d3a..603d79a95f4778e40d5ec94b78f1efa9750cd214 100644 (file)
@@ -64,7 +64,6 @@ config LEMOTE_MACH3A
        bool "Lemote Loongson 3A family machines"
        select ARCH_SPARSEMEM_ENABLE
        select GENERIC_ISA_DMA_SUPPORT_BROKEN
-       select GENERIC_HARDIRQS_NO__DO_IRQ
        select BOOT_ELF32
        select BOARD_SCACHE
        select CSRC_R4K
index e1f427f4f5f3fed4985bb370054421d7d2f91cdc..67dd94ef28e60f4023b16f7da8fac716c558b3bf 100644 (file)
@@ -91,6 +91,7 @@ EXPORT_SYMBOL(clk_put);
 
 int clk_set_rate(struct clk *clk, unsigned long rate)
 {
+       unsigned int rate_khz = rate / 1000;
        int ret = 0;
        int regval;
        int i;
@@ -111,10 +112,10 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
                if (loongson2_clockmod_table[i].frequency ==
                    CPUFREQ_ENTRY_INVALID)
                        continue;
-               if (rate == loongson2_clockmod_table[i].frequency)
+               if (rate_khz == loongson2_clockmod_table[i].frequency)
                        break;
        }
-       if (rate != loongson2_clockmod_table[i].frequency)
+       if (rate_khz != loongson2_clockmod_table[i].frequency)
                return -ENOTSUPP;
 
        clk->rate = rate;
index 30a494db99c2a0eb4d51aa64ca410de956801837..a5427c6e97574c6a1a0dbad628f7a75a3feed516 100644 (file)
 
 #define FASTPATH_SIZE  128
 
+EXPORT(tlbmiss_handler_setup_pgd_start)
 LEAF(tlbmiss_handler_setup_pgd)
-       .space          16 * 4
+1:     j       1b              /* Dummy, will be replaced. */
+       .space  64
 END(tlbmiss_handler_setup_pgd)
 EXPORT(tlbmiss_handler_setup_pgd_end)
 
index ee88367ab3addcda0e4aba412f524493dddd079a..f99ec587b151919bd90437687f08614c24aea32e 100644 (file)
@@ -1422,16 +1422,17 @@ static void build_r4000_tlb_refill_handler(void)
 extern u32 handle_tlbl[], handle_tlbl_end[];
 extern u32 handle_tlbs[], handle_tlbs_end[];
 extern u32 handle_tlbm[], handle_tlbm_end[];
-extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[];
+extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[];
+extern u32 tlbmiss_handler_setup_pgd_end[];
 
 static void build_setup_pgd(void)
 {
        const int a0 = 4;
        const int __maybe_unused a1 = 5;
        const int __maybe_unused a2 = 6;
-       u32 *p = tlbmiss_handler_setup_pgd;
+       u32 *p = tlbmiss_handler_setup_pgd_start;
        const int tlbmiss_handler_setup_pgd_size =
-               tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd;
+               tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd_start;
 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
        long pgdc = (long)pgd_current;
 #endif
index 35eb874ab7f11b25248e40714c2e477ec0f1d4a3..709f58132f5cba9d87adc37afbe84c38db40a5ac 100644 (file)
@@ -7,6 +7,7 @@
        model = "Ralink MT7620A evaluation board";
 
        memory@0 {
+               device_type = "memory";
                reg = <0x0 0x2000000>;
        };
 
index 322d7002595bda983b95f9bb77a17de24980c001..0a685db093d4dbd367228a1cb9e4d42bdf43e0e7 100644 (file)
@@ -7,6 +7,7 @@
        model = "Ralink RT2880 evaluation board";
 
        memory@0 {
+               device_type = "memory";
                reg = <0x8000000 0x2000000>;
        };
 
index 0ac73ea281984909df89403d17fb82fd4e9f3ae4..ec9e9a03554140a4c11947b2ff0a7548cb4fb7fb 100644 (file)
@@ -7,6 +7,7 @@
        model = "Ralink RT3052 evaluation board";
 
        memory@0 {
+               device_type = "memory";
                reg = <0x0 0x2000000>;
        };
 
index 2fa6b330bf4f2fcb8d6f63f4b8df55cb1fe6b065..e8df21a5d10d9eb35a2dfc88af0d137d9c66156e 100644 (file)
@@ -7,6 +7,7 @@
        model = "Ralink RT3883 evaluation board";
 
        memory@0 {
+               device_type = "memory";
                reg = <0x0 0x2000000>;
        };
 
index 1faefed32749c93ff31a7d6237732d3dd6c55f26..108d48e652af4c802da1676b18252a5394d30295 100644 (file)
@@ -22,6 +22,7 @@ config PARISC
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_STRNCPY_FROM_USER
        select SYSCTL_ARCH_UNALIGN_ALLOW
+       select SYSCTL_EXCEPTION_TRACE
        select HAVE_MOD_ARCH_SPECIFIC
        select VIRT_TO_BUS
        select MODULES_USE_ELF_RELA
index 198a86feb5748fc595f7230941485af0bc84a76a..d951c9681ab316aa2be88cd65b4d18ae62f958cb 100644 (file)
 #define STACK_TOP      TASK_SIZE
 #define STACK_TOP_MAX  DEFAULT_TASK_SIZE
 
+/* Allow bigger stacks for 64-bit processes */
+#define STACK_SIZE_MAX (USER_WIDE_MODE                                 \
+                        ? (1 << 30)    /* 1 GB */                      \
+                        : (CONFIG_MAX_STACK_SIZE_MB*1024*1024))
+
 #endif
 
 #ifndef __ASSEMBLY__
index 265ae5190b0a70e4dc2539d9a661e1e3d2deda05..47e0e21d2272468bbc864e6e30ced36c18929534 100644 (file)
 #define __NR_sched_setattr     (__NR_Linux + 334)
 #define __NR_sched_getattr     (__NR_Linux + 335)
 #define __NR_utimes            (__NR_Linux + 336)
+#define __NR_renameat2         (__NR_Linux + 337)
 
-#define __NR_Linux_syscalls    (__NR_utimes + 1)
+#define __NR_Linux_syscalls    (__NR_renameat2 + 1)
 
 
 #define __IGNORE_select                /* newselect */
index 31ffa9b5532216620d9a6106d2d4b76e7501ee0b..e1ffea2f9a0b05ccda844969dcb7c519ab17077a 100644 (file)
@@ -72,10 +72,10 @@ static unsigned long mmap_upper_limit(void)
 {
        unsigned long stack_base;
 
-       /* Limit stack size to 1GB - see setup_arg_pages() in fs/exec.c */
+       /* Limit stack size - see setup_arg_pages() in fs/exec.c */
        stack_base = rlimit_max(RLIMIT_STACK);
-       if (stack_base > (1 << 30))
-               stack_base = 1 << 30;
+       if (stack_base > STACK_SIZE_MAX)
+               stack_base = STACK_SIZE_MAX;
 
        return PAGE_ALIGN(STACK_TOP - stack_base);
 }
index a63bb179f79a1fcd56a7bcf1adbe759f46587b71..83878601103701df4497913c89d5702897249a38 100644 (file)
@@ -589,10 +589,13 @@ cas_nocontend:
 # endif
 /* ENABLE_LWS_DEBUG */
 
+       rsm     PSW_SM_I, %r0                           /* Disable interrupts */
+       /* COW breaks can cause contention on UP systems */
        LDCW    0(%sr2,%r20), %r28                      /* Try to acquire the lock */
        cmpb,<>,n       %r0, %r28, cas_action           /* Did we get it? */
 cas_wouldblock:
        ldo     2(%r0), %r28                            /* 2nd case */
+       ssm     PSW_SM_I, %r0
        b       lws_exit                                /* Contended... */
        ldo     -EAGAIN(%r0), %r21                      /* Spin in userspace */
 
@@ -619,15 +622,17 @@ cas_action:
        stw     %r1, 4(%sr2,%r20)
 #endif
        /* The load and store could fail */
-1:     ldw     0(%sr3,%r26), %r28
+1:     ldw,ma  0(%sr3,%r26), %r28
        sub,<>  %r28, %r25, %r0
-2:     stw     %r24, 0(%sr3,%r26)
+2:     stw,ma  %r24, 0(%sr3,%r26)
        /* Free lock */
-       stw     %r20, 0(%sr2,%r20)
+       stw,ma  %r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
        /* Clear thread register indicator */
        stw     %r0, 4(%sr2,%r20)
 #endif
+       /* Enable interrupts */
+       ssm     PSW_SM_I, %r0
        /* Return to userspace, set no error */
        b       lws_exit
        copy    %r0, %r21
@@ -639,6 +644,7 @@ cas_action:
 #if ENABLE_LWS_DEBUG
        stw     %r0, 4(%sr2,%r20)
 #endif
+       ssm     PSW_SM_I, %r0
        b       lws_exit
        ldo     -EFAULT(%r0),%r21       /* set errno */
        nop
index 83ead0ea127d23cdcb10115a5b04d76f40e10612..c5fa7a697fba2a13b0779e59904ec7c1428e8ef4 100644 (file)
        ENTRY_SAME(sched_setattr)
        ENTRY_SAME(sched_getattr)       /* 335 */
        ENTRY_COMP(utimes)
+       ENTRY_SAME(renameat2)
 
        /* Nothing yet */
 
index 1cd1d0c83b6d7bd7a21d0a22c57e18f2ac27f65a..47ee620d15d27850ab8ebac1f739dfd3215dae9b 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/interrupt.h>
 #include <linux/console.h>
 #include <linux/bug.h>
+#include <linux/ratelimit.h>
 
 #include <asm/assembly.h>
 #include <asm/uaccess.h>
@@ -42,9 +43,6 @@
 
 #include "../math-emu/math-emu.h"      /* for handle_fpe() */
 
-#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
-                         /*  dumped to the console via printk)          */
-
 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 DEFINE_SPINLOCK(pa_dbit_lock);
 #endif
@@ -160,6 +158,17 @@ void show_regs(struct pt_regs *regs)
        }
 }
 
+static DEFINE_RATELIMIT_STATE(_hppa_rs,
+       DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+
+#define parisc_printk_ratelimited(critical, regs, fmt, ...)    {             \
+       if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
+               printk(fmt, ##__VA_ARGS__);                                   \
+               show_regs(regs);                                              \
+       }                                                                     \
+}
+
+
 static void do_show_stack(struct unwind_frame_info *info)
 {
        int i = 1;
@@ -229,12 +238,10 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
                if (err == 0)
                        return; /* STFU */
 
-               printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
+               parisc_printk_ratelimited(1, regs,
+                       KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
                        current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
-#ifdef PRINT_USER_FAULTS
-               /* XXX for debugging only */
-               show_regs(regs);
-#endif
+
                return;
        }
 
@@ -321,14 +328,11 @@ static void handle_break(struct pt_regs *regs)
                        (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
        }
 
-#ifdef PRINT_USER_FAULTS
-       if (unlikely(iir != GDB_BREAK_INSN)) {
-               printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
+       if (unlikely(iir != GDB_BREAK_INSN))
+               parisc_printk_ratelimited(0, regs,
+                       KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
                        iir & 31, (iir>>13) & ((1<<13)-1),
                        task_pid_nr(current), current->comm);
-               show_regs(regs);
-       }
-#endif
 
        /* send standard GDB signal */
        handle_gdb_break(regs, TRAP_BRKPT);
@@ -758,11 +762,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
 
        default:
                if (user_mode(regs)) {
-#ifdef PRINT_USER_FAULTS
-                       printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
-                           task_pid_nr(current), current->comm);
-                       show_regs(regs);
-#endif
+                       parisc_printk_ratelimited(0, regs, KERN_DEBUG
+                               "handle_interruption() pid=%d command='%s'\n",
+                               task_pid_nr(current), current->comm);
                        /* SIGBUS, for lack of a better one. */
                        si.si_signo = SIGBUS;
                        si.si_code = BUS_OBJERR;
@@ -779,16 +781,10 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
 
        if (user_mode(regs)) {
            if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
-#ifdef PRINT_USER_FAULTS
-               if (fault_space == 0)
-                       printk(KERN_DEBUG "User Fault on Kernel Space ");
-               else
-                       printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
-                              code);
-               printk(KERN_CONT "pid=%d command='%s'\n",
-                      task_pid_nr(current), current->comm);
-               show_regs(regs);
-#endif
+               parisc_printk_ratelimited(0, regs, KERN_DEBUG
+                               "User fault %d on space 0x%08lx, pid=%d command='%s'\n",
+                               code, fault_space,
+                               task_pid_nr(current), current->comm);
                si.si_signo = SIGSEGV;
                si.si_errno = 0;
                si.si_code = SEGV_MAPERR;
index 747550762f3ca25acf6dabdfcc7d8aef9de31779..3ca9c1131cfe0d80b9b12fb5c0e599a3363942c0 100644 (file)
 #include <asm/uaccess.h>
 #include <asm/traps.h>
 
-#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
-                        /*  dumped to the console via printk)          */
-
-
 /* Various important other fields */
 #define bit22set(x)            (x & 0x00000200)
 #define bits23_25set(x)                (x & 0x000001c0)
@@ -34,6 +30,8 @@
 
 DEFINE_PER_CPU(struct exception_data, exception_data);
 
+int show_unhandled_signals = 1;
+
 /*
  * parisc_acctyp(unsigned int inst) --
  *    Given a PA-RISC memory access instruction, determine if the
@@ -173,6 +171,32 @@ int fixup_exception(struct pt_regs *regs)
        return 0;
 }
 
+/*
+ * Print out info about fatal segfaults, if the show_unhandled_signals
+ * sysctl is set:
+ */
+static inline void
+show_signal_msg(struct pt_regs *regs, unsigned long code,
+               unsigned long address, struct task_struct *tsk,
+               struct vm_area_struct *vma)
+{
+       if (!unhandled_signal(tsk, SIGSEGV))
+               return;
+
+       if (!printk_ratelimit())
+               return;
+
+       pr_warn("\n");
+       pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx",
+           tsk->comm, code, address);
+       print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
+       if (vma)
+               pr_warn(" vm_start = 0x%08lx, vm_end = 0x%08lx\n",
+                               vma->vm_start, vma->vm_end);
+
+       show_regs(regs);
+}
+
 void do_page_fault(struct pt_regs *regs, unsigned long code,
                              unsigned long address)
 {
@@ -270,16 +294,8 @@ bad_area:
        if (user_mode(regs)) {
                struct siginfo si;
 
-#ifdef PRINT_USER_FAULTS
-               printk(KERN_DEBUG "\n");
-               printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
-                   task_pid_nr(tsk), tsk->comm, code, address);
-               if (vma) {
-                       printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
-                                       vma->vm_start, vma->vm_end);
-               }
-               show_regs(regs);
-#endif
+               show_signal_msg(regs, code, address, tsk, vma);
+
                switch (code) {
                case 15:        /* Data TLB miss fault/Data page fault */
                        /* send SIGSEGV when outside of vma */
index 4c0cedf4e2c7dda768378223deb024c03a94b7c9..ce4c68a4a8236a4feb70c2b4a7291079d519dc98 100644 (file)
@@ -150,7 +150,9 @@ endif
 
 CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
 
-KBUILD_CPPFLAGS        += -Iarch/$(ARCH)
+asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
+
+KBUILD_CPPFLAGS        += -Iarch/$(ARCH) $(asinstr)
 KBUILD_AFLAGS  += -Iarch/$(ARCH)
 KBUILD_CFLAGS  += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
 CPP            = $(CC) -E $(KBUILD_CFLAGS)
index 6586a40a46ce161f9f654a57d0222db3bc06d611..cded7c1278ef0c419b134cabc74292209391b5c6 100644 (file)
@@ -318,11 +318,16 @@ n:
        addi    reg,reg,(name - 0b)@l;
 
 #ifdef __powerpc64__
+#ifdef HAVE_AS_ATHIGH
+#define __AS_ATHIGH high
+#else
+#define __AS_ATHIGH h
+#endif
 #define LOAD_REG_IMMEDIATE(reg,expr)           \
        lis     reg,(expr)@highest;             \
        ori     reg,reg,(expr)@higher;  \
        rldicr  reg,reg,32,31;          \
-       oris    reg,reg,(expr)@h;               \
+       oris    reg,reg,(expr)@__AS_ATHIGH;     \
        ori     reg,reg,(expr)@l;
 
 #define LOAD_REG_ADDR(reg,name)                        \
index d0e784e0ff484f0053f807e081a4c89fec13dee6..52179033067262f8f107be29c16b1fba6b8d4cee 100644 (file)
@@ -39,6 +39,17 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
                (unsigned long)_stext < end;
 }
 
+static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end)
+{
+#ifdef CONFIG_KVM_GUEST
+       extern char kvm_tmp[];
+       return start < (unsigned long)kvm_tmp &&
+               (unsigned long)&kvm_tmp[1024 * 1024] < end;
+#else
+       return 0;
+#endif
+}
+
 #undef dereference_function_descriptor
 static inline void *dereference_function_descriptor(void *ptr)
 {
index 6a0175297b0d8e92495ce97e7274ecba72fcb790..dd8695f6cb6d2c6de9bca80b073f4f40013cdeef 100644 (file)
@@ -74,7 +74,7 @@
 #define KVM_INST_MTSRIN                0x7c0001e4
 
 static bool kvm_patching_worked = true;
-static char kvm_tmp[1024 * 1024];
+char kvm_tmp[1024 * 1024];
 static int kvm_tmp_index;
 
 static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
index 59d229a2a3e08dfcd1f18ca94e56cd00e787c987..879b3aacac3282d8b9de5101c9349d2aea9b8edc 100644 (file)
@@ -237,7 +237,7 @@ static void wake_offline_cpus(void)
                if (!cpu_online(cpu)) {
                        printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
                               cpu);
-                       cpu_up(cpu);
+                       WARN_ON(cpu_up(cpu));
                }
        }
 }
index 122a580f732246c02c5e31c301078cb725435919..7e711bdcc6da5adb399e8ac0a55097e76317fe51 100644 (file)
@@ -813,9 +813,6 @@ static void __init clocksource_init(void)
 static int decrementer_set_next_event(unsigned long evt,
                                      struct clock_event_device *dev)
 {
-       /* Don't adjust the decrementer if some irq work is pending */
-       if (test_irq_work_pending())
-               return 0;
        __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
        set_dec(evt);
 
index 94e597e6f15cd08f3f4f7deb5bab73d2d3ff1f27..7af190a266b388167693163f35eb5d249eee5d55 100644 (file)
@@ -886,7 +886,7 @@ static int kvmppc_book3s_init(void)
        r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
        if (r)
                return r;
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        r = kvmppc_book3s_init_pr();
 #endif
        return r;
@@ -895,7 +895,7 @@ static int kvmppc_book3s_init(void)
 
 static void kvmppc_book3s_exit(void)
 {
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        kvmppc_book3s_exit_pr();
 #endif
        kvm_exit();
@@ -905,7 +905,7 @@ module_init(kvmppc_book3s_init);
 module_exit(kvmppc_book3s_exit);
 
 /* On 32bit this is our one and only kernel module */
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
 MODULE_ALIAS_MISCDEV(KVM_MINOR);
 MODULE_ALIAS("devname:kvm");
 #endif
index 1d6c56ad5b605f240770ed8ab6483f8b3fa00408..8fcc36306a02153dab0234e9f60558587715333f 100644 (file)
@@ -234,7 +234,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                pte_size = psize;
                pte = lookup_linux_pte_and_update(pgdir, hva, writing,
                                                  &pte_size);
-               if (pte_present(pte)) {
+               if (pte_present(pte) && !pte_numa(pte)) {
                        if (writing && !pte_write(pte))
                                /* make the actual HPTE be read-only */
                                ptel = hpte_make_readonly(ptel);
index b031f932c0cc3dcc0c452c88f8ba2c3c88cf641d..07c8b5b0f9d256d80ef4853c741a882f7e4f560f 100644 (file)
@@ -1323,6 +1323,110 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
        mr      r3, r9
        bl      kvmppc_save_fp
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+       b       2f
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+       /* Turn on TM. */
+       mfmsr   r8
+       li      r0, 1
+       rldimi  r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+       mtmsrd  r8
+
+       ld      r5, VCPU_MSR(r9)
+       rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+       beq     1f      /* TM not active in guest. */
+
+       li      r3, TM_CAUSE_KVM_RESCHED
+
+       /* Clear the MSR RI since r1, r13 are all going to be foobar. */
+       li      r5, 0
+       mtmsrd  r5, 1
+
+       /* All GPRs are volatile at this point. */
+       TRECLAIM(R3)
+
+       /* Temporarily store r13 and r9 so we have some regs to play with */
+       SET_SCRATCH0(r13)
+       GET_PACA(r13)
+       std     r9, PACATMSCRATCH(r13)
+       ld      r9, HSTATE_KVM_VCPU(r13)
+
+       /* Get a few more GPRs free. */
+       std     r29, VCPU_GPRS_TM(29)(r9)
+       std     r30, VCPU_GPRS_TM(30)(r9)
+       std     r31, VCPU_GPRS_TM(31)(r9)
+
+       /* Save away PPR and DSCR soon so don't run with user values. */
+       mfspr   r31, SPRN_PPR
+       HMT_MEDIUM
+       mfspr   r30, SPRN_DSCR
+       ld      r29, HSTATE_DSCR(r13)
+       mtspr   SPRN_DSCR, r29
+
+       /* Save all but r9, r13 & r29-r31 */
+       reg = 0
+       .rept   29
+       .if (reg != 9) && (reg != 13)
+       std     reg, VCPU_GPRS_TM(reg)(r9)
+       .endif
+       reg = reg + 1
+       .endr
+       /* ... now save r13 */
+       GET_SCRATCH0(r4)
+       std     r4, VCPU_GPRS_TM(13)(r9)
+       /* ... and save r9 */
+       ld      r4, PACATMSCRATCH(r13)
+       std     r4, VCPU_GPRS_TM(9)(r9)
+
+       /* Reload stack pointer and TOC. */
+       ld      r1, HSTATE_HOST_R1(r13)
+       ld      r2, PACATOC(r13)
+
+       /* Set MSR RI now we have r1 and r13 back. */
+       li      r5, MSR_RI
+       mtmsrd  r5, 1
+
+       /* Save away checkpinted SPRs. */
+       std     r31, VCPU_PPR_TM(r9)
+       std     r30, VCPU_DSCR_TM(r9)
+       mflr    r5
+       mfcr    r6
+       mfctr   r7
+       mfspr   r8, SPRN_AMR
+       mfspr   r10, SPRN_TAR
+       std     r5, VCPU_LR_TM(r9)
+       stw     r6, VCPU_CR_TM(r9)
+       std     r7, VCPU_CTR_TM(r9)
+       std     r8, VCPU_AMR_TM(r9)
+       std     r10, VCPU_TAR_TM(r9)
+
+       /* Restore r12 as trap number. */
+       lwz     r12, VCPU_TRAP(r9)
+
+       /* Save FP/VSX. */
+       addi    r3, r9, VCPU_FPRS_TM
+       bl      .store_fp_state
+       addi    r3, r9, VCPU_VRS_TM
+       bl      .store_vr_state
+       mfspr   r6, SPRN_VRSAVE
+       stw     r6, VCPU_VRSAVE_TM(r9)
+1:
+       /*
+        * We need to save these SPRs after the treclaim so that the software
+        * error code is recorded correctly in the TEXASR.  Also the user may
+        * change these outside of a transaction, so they must always be
+        * context switched.
+        */
+       mfspr   r5, SPRN_TFHAR
+       mfspr   r6, SPRN_TFIAR
+       mfspr   r7, SPRN_TEXASR
+       std     r5, VCPU_TFHAR(r9)
+       std     r6, VCPU_TFIAR(r9)
+       std     r7, VCPU_TEXASR(r9)
+2:
+#endif
+
        /* Increment yield count if they have a VPA */
        ld      r8, VCPU_VPA(r9)        /* do they have a VPA? */
        cmpdi   r8, 0
index c5c052a9729c95d14591087fe71b20168237dc5d..02f1defd8bb9aa092dca510f4c80edeb45fafce6 100644 (file)
@@ -1153,7 +1153,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
                goto free_vcpu;
        vcpu->arch.book3s = vcpu_book3s;
 
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        vcpu->arch.shadow_vcpu =
                kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
        if (!vcpu->arch.shadow_vcpu)
@@ -1198,7 +1198,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
 uninit_vcpu:
        kvm_vcpu_uninit(vcpu);
 free_shadow_vcpu:
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        kfree(vcpu->arch.shadow_vcpu);
 free_vcpu3s:
 #endif
@@ -1215,7 +1215,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
 
        free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
        kvm_vcpu_uninit(vcpu);
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        kfree(vcpu->arch.shadow_vcpu);
 #endif
        vfree(vcpu_book3s);
index d766d6ee33fe6889e5a96b3898f8c47b0050fc14..06ba83b036d3360bb8750b1a869dfcf9e09705d0 100644 (file)
@@ -207,6 +207,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
                if (overlaps_kernel_text(vaddr, vaddr + step))
                        tprot &= ~HPTE_R_N;
 
+               /* Make kvm guest trampolines executable */
+               if (overlaps_kvm_tmp(vaddr, vaddr + step))
+                       tprot &= ~HPTE_R_N;
+
                /*
                 * If relocatable, check if it overlaps interrupt vectors that
                 * are copied down to real 0. For relocatable kernel
index 253fefe3d1a0e76fd4ed3996711b6f1784725d6c..5b51079f3e3ba52f7ecb51c3fb27a5d7255ebb48 100644 (file)
@@ -549,7 +549,8 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
                ret = ioda_eeh_phb_reset(hose, option);
        } else {
                bus = eeh_pe_bus_get(pe);
-               if (pci_is_root_bus(bus))
+               if (pci_is_root_bus(bus) ||
+                   pci_is_root_bus(bus->parent))
                        ret = ioda_eeh_root_reset(hose, option);
                else
                        ret = ioda_eeh_bridge_reset(hose, bus->self, option);
index cf3c0089bef253d7817a604f9a7f195a4ffd9bc0..23223cd63e54811d9ab85823df1e03303c88036e 100644 (file)
@@ -820,6 +820,9 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
                else
                        memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
                spin_unlock(&ctrblk_lock);
+       } else {
+               if (!nbytes)
+                       memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
        }
        /*
         * final block may be < AES_BLOCK_SIZE, copy only nbytes
index 0a5aac8a9412b64c4b2c43f9999e0f1e2f3815cb..7acb77f7ef1ada0183280c07e75f2c34579bac72 100644 (file)
@@ -429,6 +429,9 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
                else
                        memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
                spin_unlock(&ctrblk_lock);
+       } else {
+               if (!nbytes)
+                       memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
        }
        /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
        if (nbytes) {
index b3ecb8f5b6ce2bcefb4fe92a64b99d2012cdd770..9ae6664ff08c768975ce452efa84c7cc9cdb0656 100644 (file)
@@ -158,6 +158,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_ONE_REG:
        case KVM_CAP_ENABLE_CAP:
        case KVM_CAP_S390_CSS_SUPPORT:
+       case KVM_CAP_IRQFD:
        case KVM_CAP_IOEVENTFD:
        case KVM_CAP_DEVICE_CTRL:
        case KVM_CAP_ENABLE_CAP_VM:
index 452d3ebd9d0fba3b513a6978b096bd22b27c1b46..e9f8fa9337fe1fae0d5d0ba9bf03a8426961a3c0 100644 (file)
@@ -811,7 +811,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
                return NULL;
        memset(header, 0, sz);
        header->pages = sz / PAGE_SIZE;
-       hole = sz - (bpfsize + sizeof(*header));
+       hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header));
        /* Insert random number of illegal instructions before BPF code
         * and make sure the first instruction starts at an even address.
         */
index fde5abaac0ccbf6ee8c9d7f3e198d2b93e1fd384..1a49ffdf9da91056cb24357b6fdefea772658201 100644 (file)
@@ -24,7 +24,8 @@
 
 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
  * The page copy blockops can use 0x6000000 to 0x8000000.
- * The TSB is mapped in the 0x8000000 to 0xa000000 range.
+ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
+ * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
  * The vmalloc area spans 0x100000000 to 0x200000000.
  * Since modules need to be in the lowest 32-bits of the address space,
@@ -33,7 +34,8 @@
  * 0x400000000.
  */
 #define        TLBTEMP_BASE            _AC(0x0000000006000000,UL)
-#define        TSBMAP_BASE             _AC(0x0000000008000000,UL)
+#define        TSBMAP_8K_BASE          _AC(0x0000000008000000,UL)
+#define        TSBMAP_4M_BASE          _AC(0x0000000008400000,UL)
 #define MODULES_VADDR          _AC(0x0000000010000000,UL)
 #define MODULES_LEN            _AC(0x00000000e0000000,UL)
 #define MODULES_END            _AC(0x00000000f0000000,UL)
index a364000ca1aa8a495f7e8b9bf59882350f41c6de..7f41d40b7e6e8ccf89b5ce12a9422bbf4e84ac2e 100644 (file)
@@ -151,7 +151,7 @@ static ssize_t store_mmustat_enable(struct device *s,
                        size_t count)
 {
        unsigned long val, err;
-       int ret = sscanf(buf, "%ld", &val);
+       int ret = sscanf(buf, "%lu", &val);
 
        if (ret != 1)
                return -EINVAL;
index 2c20ad63ddbf2bbf8a4da5e751e49650d8be7060..30eee6e8a81b2d45797aab304914b10571573b1a 100644 (file)
@@ -236,6 +236,7 @@ FUNC_NAME:  /* %o0=dst, %o1=src, %o2=len */
         */
        VISEntryHalf
 
+       membar          #Sync
        alignaddr       %o1, %g0, %g0
 
        add             %o1, (64 - 1), %o4
index a8ff0d1a3b6999ab7c0f16de972d192a48a951ff..4ced3fc66130c30b8870c21cea57611056046d50 100644 (file)
@@ -281,18 +281,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
        show_regs(regs);
 }
 
-static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
-                                                        unsigned long addr)
-{
-       static int times;
-
-       if (times++ < 10)
-               printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
-                      "reports 64-bit fault address [%lx]\n",
-                      current->comm, current->pid, addr);
-       show_regs(regs);
-}
-
 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 {
        enum ctx_state prev_state = exception_enter();
@@ -322,10 +310,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
                                goto intr_or_no_mm;
                        }
                }
-               if (unlikely((address >> 32) != 0)) {
-                       bogus_32bit_fault_address(regs, address);
+               if (unlikely((address >> 32) != 0))
                        goto intr_or_no_mm;
-               }
        }
 
        if (regs->tstate & TSTATE_PRIV) {
index f5d506fdddad3dea459aa97308e9a8872f34ffe8..fe19b81acc091b4d994da81f5580fe4438664388 100644 (file)
@@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
        mm->context.tsb_block[tsb_idx].tsb_nentries =
                tsb_bytes / sizeof(struct tsb);
 
-       base = TSBMAP_BASE;
+       switch (tsb_idx) {
+       case MM_TSB_BASE:
+               base = TSBMAP_8K_BASE;
+               break;
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+       case MM_TSB_HUGE:
+               base = TSBMAP_4M_BASE;
+               break;
+#endif
+       default:
+               BUG();
+       }
+
        tte = pgprot_val(PAGE_KERNEL_LOCKED);
        tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
        BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
index ce6ad7e6a7d7c7ba743884bbee2bcf77262f814d..33f71b01fd22e74d5e3d80b26ce4694689642064 100644 (file)
@@ -79,6 +79,7 @@ else
         UTS_MACHINE := x86_64
         CHECKFLAGS += -D__x86_64__ -m64
 
+        biarch := -m64
         KBUILD_AFLAGS += -m64
         KBUILD_CFLAGS += -m64
 
index abb9eba61b500192cd816dd9283fe8c8fb70b858..dbe8dd2fe247fb0632a79d5a20d1b4714729c47d 100644 (file)
@@ -71,7 +71,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
 
 SETUP_OBJS = $(addprefix $(obj)/,$(setup-y))
 
-sed-voffset := -e 's/^\([0-9a-fA-F]*\) . \(_text\|_end\)$$/\#define VO_\2 0x\1/p'
+sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|_end\)$$/\#define VO_\2 0x\1/p'
 
 quiet_cmd_voffset = VOFFSET $@
       cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@
@@ -80,7 +80,7 @@ targets += voffset.h
 $(obj)/voffset.h: vmlinux FORCE
        $(call if_changed,voffset)
 
-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
+sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
 
 quiet_cmd_zoffset = ZOFFSET $@
       cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
index 17684615374b269f2d2eee0a826ab04d4eaf0f22..57ab74df7eeaa3eef4954b89f36d20b4c528afd7 100644 (file)
@@ -354,7 +354,7 @@ static void parse_elf(void *output)
        free(phdrs);
 }
 
-asmlinkage void *decompress_kernel(void *rmode, memptr heap,
+asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
                                  unsigned char *input_data,
                                  unsigned long input_len,
                                  unsigned char *output,
index b18df579c0e99b09ff33261e692135338826de3a..36f7125945e3e241cdf2ac825124fd8d7883e0ba 100644 (file)
@@ -63,6 +63,7 @@
 /* hpet memory map physical address */
 extern unsigned long hpet_address;
 extern unsigned long force_hpet_address;
+extern int boot_hpet_disable;
 extern u8 hpet_blockid;
 extern int hpet_force_user;
 extern u8 hpet_msi_disable;
index a8091216963b006145baa000e905edbdececae63..68c05398bba9b449a1324d54b584ce52d52aa8d1 100644 (file)
@@ -52,6 +52,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
                                         unsigned long addr, pte_t *ptep)
 {
+       ptep_clear_flush(vma, addr, ptep);
 }
 
 static inline int huge_pte_none(pte_t pte)
index 8de6d9cf3b954a7f31bcb756c92c20cc08db5759..678205195ae118e16ca34609a24f472d9875e568 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _ASM_X86_PAGE_64_DEFS_H
 #define _ASM_X86_PAGE_64_DEFS_H
 
-#define THREAD_SIZE_ORDER      1
+#define THREAD_SIZE_ORDER      2
 #define THREAD_SIZE  (PAGE_SIZE << THREAD_SIZE_ORDER)
 #define CURRENT_MASK (~(THREAD_SIZE - 1))
 
index c827ace3121bc0f7ff3dc9d4cc74024207adc68c..fcf2b3ae1bf0208d9e6b153dc81c6b0ed60f8352 100644 (file)
 #define MSR_IA32_MISC_ENABLE_MWAIT_BIT                 18
 #define MSR_IA32_MISC_ENABLE_MWAIT                     (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT)
 #define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT           22
-#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID               (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT);
+#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID               (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT)
 #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT          23
 #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE              (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT)
 #define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT            34
index 3a2ae4c88948dcadb5b9afbfc2e10ef502202930..31368207837c2fbcd93f73d94ccf9c2d572d5cf5 100644 (file)
@@ -31,7 +31,7 @@ static char temp_stack[4096];
  *
  * Wrapper around acpi_enter_sleep_state() to be called by assmebly.
  */
-acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state)
+acpi_status asmlinkage __visible x86_acpi_enter_sleep_state(u8 state)
 {
        return acpi_enter_sleep_state(state);
 }
index d23aa82e7a7bc25c702be004f804a0c9d02c15f7..992060e09897dd2b068f8d8ebf6f02fa15fa2845 100644 (file)
@@ -2189,7 +2189,7 @@ void send_cleanup_vector(struct irq_cfg *cfg)
        cfg->move_in_progress = 0;
 }
 
-asmlinkage void smp_irq_move_cleanup_interrupt(void)
+asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
 {
        unsigned vector, me;
 
index d921b7ee659525e7d040ff5ea5c6a569239a64a7..36a1bb6d1ee0d431752a170714aa6a43576ffada 100644 (file)
@@ -429,14 +429,14 @@ static inline void __smp_thermal_interrupt(void)
        smp_thermal_vector();
 }
 
-asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
+asmlinkage __visible void smp_thermal_interrupt(struct pt_regs *regs)
 {
        entering_irq();
        __smp_thermal_interrupt();
        exiting_ack_irq();
 }
 
-asmlinkage void smp_trace_thermal_interrupt(struct pt_regs *regs)
+asmlinkage __visible void smp_trace_thermal_interrupt(struct pt_regs *regs)
 {
        entering_irq();
        trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
index fe6b1c86645b668758331b78d93ab8c549ab09c2..7245980186eea047e643af5010e1509bfc991632 100644 (file)
@@ -24,14 +24,14 @@ static inline void __smp_threshold_interrupt(void)
        mce_threshold_vector();
 }
 
-asmlinkage void smp_threshold_interrupt(void)
+asmlinkage __visible void smp_threshold_interrupt(void)
 {
        entering_irq();
        __smp_threshold_interrupt();
        exiting_ack_irq();
 }
 
-asmlinkage void smp_trace_threshold_interrupt(void)
+asmlinkage __visible void smp_trace_threshold_interrupt(void)
 {
        entering_irq();
        trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
index aa333d9668866f808955209f8cd71737d447eafc..adb02aa62af5e310ff51ca720d1163b247489ccb 100644 (file)
@@ -169,7 +169,6 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly =
 {
        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
-       FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */
        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
        EVENT_CONSTRAINT_END
 };
index 384df5105fbc9883626ec5482151babd43ce482a..136ac74dee823005cea04ea9600a0c62cdf5685c 100644 (file)
@@ -27,6 +27,7 @@
 static int __init x86_rdrand_setup(char *s)
 {
        setup_clear_cpu_cap(X86_FEATURE_RDRAND);
+       setup_clear_cpu_cap(X86_FEATURE_RDSEED);
        return 1;
 }
 __setup("nordrand", x86_rdrand_setup);
index 6e2537c3219060b31a9344c5df0dd90d8d3afbd2..6cda0baeac9d7810dadbfadc9b9cb2b228c68c5e 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/dma.h>
 #include <asm/io_apic.h>
 #include <asm/apic.h>
+#include <asm/hpet.h>
 #include <asm/iommu.h>
 #include <asm/gart.h>
 #include <asm/irq_remapping.h>
@@ -530,6 +531,15 @@ static void __init intel_graphics_stolen(int num, int slot, int func)
        }
 }
 
+static void __init force_disable_hpet(int num, int slot, int func)
+{
+#ifdef CONFIG_HPET_TIMER
+       boot_hpet_disable = 1;
+       pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n");
+#endif
+}
+
+
 #define QFLAG_APPLY_ONCE       0x1
 #define QFLAG_APPLIED          0x2
 #define QFLAG_DONE             (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -567,6 +577,12 @@ static struct chipset early_qrk[] __initdata = {
          PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
        { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
          QFLAG_APPLY_ONCE, intel_graphics_stolen },
+       /*
+        * HPET on current version of Baytrail platform has accuracy
+        * problems, disable it for now:
+        */
+       { PCI_VENDOR_ID_INTEL, 0x0f00,
+               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
        {}
 };
 
index c61a14a4a3109f92ee63bb4df2ba470540c1d266..d6c1b983699576523aacb5b8066d48311eb94a67 100644 (file)
@@ -29,7 +29,7 @@ static void __init i386_default_early_setup(void)
        reserve_ebda_region();
 }
 
-asmlinkage void __init i386_start_kernel(void)
+asmlinkage __visible void __init i386_start_kernel(void)
 {
        sanitize_boot_params(&boot_params);
 
index 85126ccbdf6b1e957c231a73378cc1ffe78092e8..068054f4bf20b75e124d4776ee9ffa889a6e1a0c 100644 (file)
@@ -137,7 +137,7 @@ static void __init copy_bootdata(char *real_mode_data)
        }
 }
 
-asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
+asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
 {
        int i;
 
index 8d80ae0116039b6c71945737befc4f88dea097df..4177bfbc80b0d44400c563fa46affdbe79bc25d9 100644 (file)
@@ -88,7 +88,7 @@ static inline void hpet_clear_mapping(void)
 /*
  * HPET command line enable / disable
  */
-static int boot_hpet_disable;
+int boot_hpet_disable;
 int hpet_force_user;
 static int hpet_verbose;
 
index af1d14a9ebdae1ac2fddace4c15271babd1613a3..dcbbaa165bdeed61dd2b504a13ca05ced99737c7 100644 (file)
@@ -20,6 +20,8 @@
 #include <asm/mmu_context.h>
 #include <asm/syscalls.h>
 
+int sysctl_ldt16 = 0;
+
 #ifdef CONFIG_SMP
 static void flush_ldt(void *current_mm)
 {
@@ -234,7 +236,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
         * IRET leaking the high bits of the kernel stack address.
         */
 #ifdef CONFIG_X86_64
-       if (!ldt_info.seg_32bit) {
+       if (!ldt_info.seg_32bit && !sysctl_ldt16) {
                error = -EINVAL;
                goto out_unlock;
        }
index 9c0280f93d05dbf9b644abd040b472b9c69d4f55..898d077617a99ab7c6ef055b06f409c8222a4249 100644 (file)
@@ -52,7 +52,7 @@
 
 asmlinkage extern void ret_from_fork(void);
 
-asmlinkage DEFINE_PER_CPU(unsigned long, old_rsp);
+__visible DEFINE_PER_CPU(unsigned long, old_rsp);
 
 /* Prints also some state that isn't saved in the pt_regs */
 void __show_regs(struct pt_regs *regs, int all)
index 3399d3a997303322a9b5dc425080dd3553f98594..52b1157c53eb7f275b22d4a74def7daced321a80 100644 (file)
@@ -191,6 +191,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                },
        },
 
+       /* Certec */
+       {       /* Handle problems with rebooting on Certec BPC600 */
+               .callback = set_pci_reboot,
+               .ident = "Certec BPC600",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Certec"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "BPC600"),
+               },
+       },
+
        /* Dell */
        {       /* Handle problems with rebooting on Dell DXP061 */
                .callback = set_bios_reboot,
index 7c3a5a61f2e46384c22abdf761afb4611d1efae6..be8e1bde07aa47ff373f0245e0f4b7d6d2edcfd5 100644 (file)
@@ -168,7 +168,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
  * this function calls the 'stop' function on all other CPUs in the system.
  */
 
-asmlinkage void smp_reboot_interrupt(void)
+asmlinkage __visible void smp_reboot_interrupt(void)
 {
        ack_APIC_irq();
        irq_enter();
index 57409f6b8c623e38c1a60dfa1029bb9d194c99e6..f73b5d435bdca59ff7c12c157a36997773fc2e07 100644 (file)
@@ -357,7 +357,7 @@ exit:
  * for scheduling or signal handling. The actual stack switch is done in
  * entry.S
  */
-asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
+asmlinkage __visible __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
 {
        struct pt_regs *regs = eregs;
        /* Did already sync */
@@ -601,11 +601,11 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
 #endif
 }
 
-asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
+asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void)
 {
 }
 
-asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
+asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void)
 {
 }
 
index 5edc34b5b9514df1f63af487a314ab4053d197a2..b99b9ad8540c525f79468d6fce0682681478c008 100644 (file)
@@ -36,7 +36,7 @@ static int irq_routing_comply = 1;
  * and vice versa.
  */
 
-asmlinkage unsigned long vsmp_save_fl(void)
+asmlinkage __visible unsigned long vsmp_save_fl(void)
 {
        unsigned long flags = native_save_fl();
 
@@ -56,7 +56,7 @@ __visible void vsmp_restore_fl(unsigned long flags)
 }
 PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
 
-asmlinkage void vsmp_irq_disable(void)
+asmlinkage __visible void vsmp_irq_disable(void)
 {
        unsigned long flags = native_save_fl();
 
@@ -64,7 +64,7 @@ asmlinkage void vsmp_irq_disable(void)
 }
 PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
 
-asmlinkage void vsmp_irq_enable(void)
+asmlinkage __visible void vsmp_irq_enable(void)
 {
        unsigned long flags = native_save_fl();
 
index f9c6e56e14b5f31229b75721db9975dac5fe58c9..9531fbb123ba2223257f48dd724b59a45289389c 100644 (file)
@@ -43,7 +43,7 @@ void update_vsyscall(struct timekeeper *tk)
        vdata->monotonic_time_sec       = tk->xtime_sec
                                        + tk->wall_to_monotonic.tv_sec;
        vdata->monotonic_time_snsec     = tk->xtime_nsec
-                                       + (tk->wall_to_monotonic.tv_nsec
+                                       + ((u64)tk->wall_to_monotonic.tv_nsec
                                                << tk->shift);
        while (vdata->monotonic_time_snsec >=
                                        (((u64)NSEC_PER_SEC) << tk->shift)) {
index 33e8c028842fb4b0b59bc269a973b195a104cdf8..138ceffc6377bba9e2c4fb5529ad379f81b0bec6 100644 (file)
@@ -7778,7 +7778,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
        exec_control = vmcs12->pin_based_vm_exec_control;
        exec_control |= vmcs_config.pin_based_exec_ctrl;
-       exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+       exec_control &= ~(PIN_BASED_VMX_PREEMPTION_TIMER |
+                          PIN_BASED_POSTED_INTR);
        vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
 
        vmx->nested.preemption_timer_expired = false;
@@ -7815,7 +7816,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
                if (!vmx->rdtscp_enabled)
                        exec_control &= ~SECONDARY_EXEC_RDTSCP;
                /* Take the following fields only from vmcs12 */
-               exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+               exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+                                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+                                  SECONDARY_EXEC_APIC_REGISTER_VIRT);
                if (nested_cpu_has(vmcs12,
                                CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
                        exec_control |= vmcs12->secondary_vm_exec_control;
index 8b8fc0b792baeddf76a1b0ee3dac3a6a9b5cd0de..20316c67b824a9d06baa89cc0cf905eae3f24665 100644 (file)
@@ -106,6 +106,8 @@ EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
 static u32 tsc_tolerance_ppm = 250;
 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
 
+static bool backwards_tsc_observed = false;
+
 #define KVM_NR_SHARED_MSRS 16
 
 struct kvm_shared_msrs_global {
@@ -280,7 +282,7 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 }
 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 
-asmlinkage void kvm_spurious_fault(void)
+asmlinkage __visible void kvm_spurious_fault(void)
 {
        /* Fault while not rebooting.  We want the trace. */
        BUG();
@@ -1486,7 +1488,8 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
                                        &ka->master_kernel_ns,
                                        &ka->master_cycle_now);
 
-       ka->use_master_clock = host_tsc_clocksource & vcpus_matched;
+       ka->use_master_clock = host_tsc_clocksource && vcpus_matched
+                               && !backwards_tsc_observed;
 
        if (ka->use_master_clock)
                atomic_set(&kvm_guest_has_master_clock, 1);
@@ -6945,6 +6948,7 @@ int kvm_arch_hardware_enable(void *garbage)
         */
        if (backwards_tsc) {
                u64 delta_cyc = max_tsc - local_tsc;
+               backwards_tsc_observed = true;
                list_for_each_entry(kvm, &vm_list, vm_list) {
                        kvm_for_each_vcpu(i, vcpu, kvm) {
                                vcpu->arch.tsc_offset_adjustment += delta_cyc;
index ad1fb5f53925e8634fac38da497128fed1904c55..aae94132bc24dd42b548d3dc18214b668195c06f 100644 (file)
@@ -233,13 +233,13 @@ static void lguest_end_context_switch(struct task_struct *next)
  * flags word contains all kind of stuff, but in practice Linux only cares
  * about the interrupt flag.  Our "save_flags()" just returns that.
  */
-asmlinkage unsigned long lguest_save_fl(void)
+asmlinkage __visible unsigned long lguest_save_fl(void)
 {
        return lguest_data.irq_enabled;
 }
 
 /* Interrupts go off... */
-asmlinkage void lguest_irq_disable(void)
+asmlinkage __visible void lguest_irq_disable(void)
 {
        lguest_data.irq_enabled = 0;
 }
index db9db446b71a66fe5bd59de47232e8fa69e8c96e..43623739c7cf315038f908d9623e601d8069c35b 100644 (file)
@@ -76,7 +76,7 @@ static inline int __flip_bit(u32 msr, u8 bit, bool set)
        if (m1.q == m.q)
                return 0;
 
-       err = msr_write(msr, &m);
+       err = msr_write(msr, &m1);
        if (err)
                return err;
 
index a5449089cd9fef6e58a03174c5fe5a34cd48983a..9e6545f269e548e7cff7f4ee51876c440a7b6706 100644 (file)
@@ -302,7 +302,7 @@ static struct {
              0x242  in div_Xsig.S
  */
 
-asmlinkage void FPU_exception(int n)
+asmlinkage __visible void FPU_exception(int n)
 {
        int i, int_type;
 
@@ -492,7 +492,7 @@ int real_2op_NaN(FPU_REG const *b, u_char tagb,
 
 /* Invalid arith operation on Valid registers */
 /* Returns < 0 if the exception is unmasked */
-asmlinkage int arith_invalid(int deststnr)
+asmlinkage __visible int arith_invalid(int deststnr)
 {
 
        EXCEPTION(EX_Invalid);
@@ -507,7 +507,7 @@ asmlinkage int arith_invalid(int deststnr)
 }
 
 /* Divide a finite number by zero */
-asmlinkage int FPU_divide_by_zero(int deststnr, u_char sign)
+asmlinkage __visible int FPU_divide_by_zero(int deststnr, u_char sign)
 {
        FPU_REG *dest = &st(deststnr);
        int tag = TAG_Valid;
@@ -539,7 +539,7 @@ int set_precision_flag(int flags)
 }
 
 /* This may be called often, so keep it lean */
-asmlinkage void set_precision_flag_up(void)
+asmlinkage __visible void set_precision_flag_up(void)
 {
        if (control_word & CW_Precision)
                partial_status |= (SW_Precision | SW_C1);       /* The masked response */
@@ -548,7 +548,7 @@ asmlinkage void set_precision_flag_up(void)
 }
 
 /* This may be called often, so keep it lean */
-asmlinkage void set_precision_flag_down(void)
+asmlinkage __visible void set_precision_flag_down(void)
 {
        if (control_word & CW_Precision) {      /* The masked response */
                partial_status &= ~SW_C1;
@@ -557,7 +557,7 @@ asmlinkage void set_precision_flag_down(void)
                EXCEPTION(EX_Precision);
 }
 
-asmlinkage int denormal_operand(void)
+asmlinkage __visible int denormal_operand(void)
 {
        if (control_word & CW_Denormal) {       /* The masked response */
                partial_status |= SW_Denorm_Op;
@@ -568,7 +568,7 @@ asmlinkage int denormal_operand(void)
        }
 }
 
-asmlinkage int arith_overflow(FPU_REG *dest)
+asmlinkage __visible int arith_overflow(FPU_REG *dest)
 {
        int tag = TAG_Valid;
 
@@ -596,7 +596,7 @@ asmlinkage int arith_overflow(FPU_REG *dest)
 
 }
 
-asmlinkage int arith_underflow(FPU_REG *dest)
+asmlinkage __visible int arith_underflow(FPU_REG *dest)
 {
        int tag = TAG_Valid;
 
index dc017735bb91b7b2ec61f333b091c63accdb921b..6d5663a599a7a362756b5bcb6d3ff3e30ee34bae 100644 (file)
@@ -171,7 +171,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
        memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
 
        header->pages = sz / PAGE_SIZE;
-       hole = sz - (proglen + sizeof(*header));
+       hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
 
        /* insert a random number of int3 instructions before BPF code */
        *image_ptr = &header->image[prandom_u32() % hole];
index 81b506d5befd46e5a494d6cbef7aa71edceca7f1..524142117296898237466e9630da1dadd8bbe1fd 100644 (file)
 
 static const struct font_desc *font;
 static u32 efi_x, efi_y;
+static void *efi_fb;
+static bool early_efi_keep;
 
-static __init void early_efi_clear_scanline(unsigned int y)
+/*
+ * efi earlyprintk need use early_ioremap to map the framebuffer.
+ * But early_ioremap is not usable for earlyprintk=efi,keep, ioremap should
+ * be used instead. ioremap will be available after paging_init() which is
+ * earlier than initcall callbacks. Thus adding this early initcall function
+ * early_efi_map_fb to map the whole efi framebuffer.
+ */
+static __init int early_efi_map_fb(void)
 {
-       unsigned long base, *dst;
-       u16 len;
+       unsigned long base, size;
+
+       if (!early_efi_keep)
+               return 0;
 
        base = boot_params.screen_info.lfb_base;
-       len = boot_params.screen_info.lfb_linelength;
+       size = boot_params.screen_info.lfb_size;
+       efi_fb = ioremap(base, size);
+
+       return efi_fb ? 0 : -ENOMEM;
+}
+early_initcall(early_efi_map_fb);
+
+/*
+ * early_efi_map maps efi framebuffer region [start, start + len -1]
+ * In case earlyprintk=efi,keep we have the whole framebuffer mapped already
+ * so just return the offset efi_fb + start.
+ */
+static __init_refok void *early_efi_map(unsigned long start, unsigned long len)
+{
+       unsigned long base;
+
+       base = boot_params.screen_info.lfb_base;
+
+       if (efi_fb)
+               return (efi_fb + start);
+       else
+               return early_ioremap(base + start, len);
+}
 
-       dst = early_ioremap(base + y*len, len);
+static __init_refok void early_efi_unmap(void *addr, unsigned long len)
+{
+       if (!efi_fb)
+               early_iounmap(addr, len);
+}
+
+static void early_efi_clear_scanline(unsigned int y)
+{
+       unsigned long *dst;
+       u16 len;
+
+       len = boot_params.screen_info.lfb_linelength;
+       dst = early_efi_map(y*len, len);
        if (!dst)
                return;
 
        memset(dst, 0, len);
-       early_iounmap(dst, len);
+       early_efi_unmap(dst, len);
 }
 
-static __init void early_efi_scroll_up(void)
+static void early_efi_scroll_up(void)
 {
-       unsigned long base, *dst, *src;
+       unsigned long *dst, *src;
        u16 len;
        u32 i, height;
 
-       base = boot_params.screen_info.lfb_base;
        len = boot_params.screen_info.lfb_linelength;
        height = boot_params.screen_info.lfb_height;
 
        for (i = 0; i < height - font->height; i++) {
-               dst = early_ioremap(base + i*len, len);
+               dst = early_efi_map(i*len, len);
                if (!dst)
                        return;
 
-               src = early_ioremap(base + (i + font->height) * len, len);
+               src = early_efi_map((i + font->height) * len, len);
                if (!src) {
-                       early_iounmap(dst, len);
+                       early_efi_unmap(dst, len);
                        return;
                }
 
                memmove(dst, src, len);
 
-               early_iounmap(src, len);
-               early_iounmap(dst, len);
+               early_efi_unmap(src, len);
+               early_efi_unmap(dst, len);
        }
 }
 
@@ -79,16 +123,14 @@ static void early_efi_write_char(u32 *dst, unsigned char c, unsigned int h)
        }
 }
 
-static __init void
+static void
 early_efi_write(struct console *con, const char *str, unsigned int num)
 {
        struct screen_info *si;
-       unsigned long base;
        unsigned int len;
        const char *s;
        void *dst;
 
-       base = boot_params.screen_info.lfb_base;
        si = &boot_params.screen_info;
        len = si->lfb_linelength;
 
@@ -109,7 +151,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
                for (h = 0; h < font->height; h++) {
                        unsigned int n, x;
 
-                       dst = early_ioremap(base + (efi_y + h) * len, len);
+                       dst = early_efi_map((efi_y + h) * len, len);
                        if (!dst)
                                return;
 
@@ -123,7 +165,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
                                s++;
                        }
 
-                       early_iounmap(dst, len);
+                       early_efi_unmap(dst, len);
                }
 
                num -= count;
@@ -179,6 +221,9 @@ static __init int early_efi_setup(struct console *con, char *options)
        for (i = 0; i < (yres - efi_y) / font->height; i++)
                early_efi_scroll_up();
 
+       /* early_console_register will unset CON_BOOT in case ,keep */
+       if (!(con->flags & CON_BOOT))
+               early_efi_keep = true;
        return 0;
 }
 
index ff0174dda810fd026bfde830747d52f48ffbdb0d..a9acde72d4ed9844b62c0ce87a5d441f21004ced 100644 (file)
@@ -75,7 +75,7 @@ static int xo1_power_state_enter(suspend_state_t pm_state)
        return 0;
 }
 
-asmlinkage int xo1_do_sleep(u8 sleep_state)
+asmlinkage __visible int xo1_do_sleep(u8 sleep_state)
 {
        void *pgd_addr = __va(read_cr3());
 
index 304fca20d96ee3e1540d7045a9e0e25118c240b0..35e2bb6c0f372d1ff6f91a6078efa339b25efd78 100644 (file)
@@ -23,7 +23,7 @@
 extern __visible const void __nosave_begin, __nosave_end;
 
 /* Defined in hibernate_asm_64.S */
-extern asmlinkage int restore_image(void);
+extern asmlinkage __visible int restore_image(void);
 
 /*
  * Address to jump to in the last phase of restore in order to get to the image
index 00348980a3a64a49180be23bda3517d314c6bf81..e1f220e3ca6899af1d542ce7d22903e961754c7c 100644 (file)
@@ -39,6 +39,7 @@
 #ifdef CONFIG_X86_64
 #define vdso_enabled                   sysctl_vsyscall32
 #define arch_setup_additional_pages    syscall32_setup_pages
+extern int sysctl_ldt16;
 #endif
 
 /*
@@ -249,6 +250,13 @@ static struct ctl_table abi_table2[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "ldt16",
+               .data           = &sysctl_ldt16,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
        {}
 };
 
index 201d09a7c46bbae56a21d15e56e222d67163f5ff..c34bfc4bbe7faeaa9fac33d23331688ecdd85078 100644 (file)
@@ -1515,7 +1515,7 @@ static void __init xen_pvh_early_guest_init(void)
 }
 
 /* First C function to be called on Xen boot */
-asmlinkage void __init xen_start_kernel(void)
+asmlinkage __visible void __init xen_start_kernel(void)
 {
        struct physdev_set_iopl set_iopl;
        int rc;
index 08f763de26fe4132d7e6dcf0a7b50a660af76319..a1207cb6472a90ce9e57deeb1bd63eb8ebd74c4d 100644 (file)
@@ -23,7 +23,7 @@ void xen_force_evtchn_callback(void)
        (void)HYPERVISOR_xen_version(0, NULL);
 }
 
-asmlinkage unsigned long xen_save_fl(void)
+asmlinkage __visible unsigned long xen_save_fl(void)
 {
        struct vcpu_info *vcpu;
        unsigned long flags;
@@ -63,7 +63,7 @@ __visible void xen_restore_fl(unsigned long flags)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
 
-asmlinkage void xen_irq_disable(void)
+asmlinkage __visible void xen_irq_disable(void)
 {
        /* There's a one instruction preempt window here.  We need to
           make sure we're don't switch CPUs between getting the vcpu
@@ -74,7 +74,7 @@ asmlinkage void xen_irq_disable(void)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
 
-asmlinkage void xen_irq_enable(void)
+asmlinkage __visible void xen_irq_enable(void)
 {
        struct vcpu_info *vcpu;
 
index e4a4145926f629787ce0647f3036a98298c8f055..1039fb9ff5f5f998628884dedab422c9b405a36c 100644 (file)
@@ -451,7 +451,20 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
        struct blkcg_gq *blkg;
        int i;
 
-       mutex_lock(&blkcg_pol_mutex);
+       /*
+        * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex
+        * which ends up putting cgroup's internal cgroup_tree_mutex under
+        * it; however, cgroup_tree_mutex is nested above cgroup file
+        * active protection and grabbing blkcg_pol_mutex from a cgroup
+        * file operation creates a possible circular dependency.  cgroup
+        * internal locking is planned to go through further simplification
+        * and this issue should go away soon.  For now, let's trylock
+        * blkcg_pol_mutex and restart the write on failure.
+        *
+        * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com
+        */
+       if (!mutex_trylock(&blkcg_pol_mutex))
+               return restart_syscall();
        spin_lock_irq(&blkcg->lock);
 
        /*
index d05d81b19b50c229b2aa100bf01f8ac2818c0c1c..7183b6af5dac2b72fc53edc7044d743eac57ed45 100644 (file)
@@ -119,7 +119,7 @@ obj-$(CONFIG_SGI_SN)                += sn/
 obj-y                          += firmware/
 obj-$(CONFIG_CRYPTO)           += crypto/
 obj-$(CONFIG_SUPERH)           += sh/
-obj-$(CONFIG_ARCH_SHMOBILE_LEGACY)     += sh/
+obj-$(CONFIG_ARCH_SHMOBILE)    += sh/
 ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
 obj-y                          += clocksource/
 endif
index ab686b31010034083ba3b4784ba288fb93b3f554..a34a22841002495713a74f960dea482ecee8dc11 100644 (file)
@@ -47,6 +47,23 @@ config ACPI_SLEEP
        depends on SUSPEND || HIBERNATION
        default y
 
+config ACPI_PROCFS_POWER
+       bool "Deprecated power /proc/acpi directories"
+       depends on PROC_FS
+       help
+         For backwards compatibility, this option allows
+          deprecated power /proc/acpi/ directories to exist, even when
+          they have been replaced by functions in /sys.
+          The deprecated directories (and their replacements) include:
+         /proc/acpi/battery/* (/sys/class/power_supply/*)
+         /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
+         This option has no effect on /proc/acpi/ directories
+         and functions, which do not yet exist in /sys
+         This option, together with the proc directories, will be
+         deleted in the future.
+
+         Say N to delete power /proc/acpi/ directories that have moved to /sys/
+
 config ACPI_EC_DEBUGFS
        tristate "EC read/write access through /sys/kernel/debug/ec"
        default n
index 0331f91d56e663d63a7268d1a77bb3ae1080f715..bce34afadcd05d250831f098ca533565750345e6 100644 (file)
@@ -47,6 +47,7 @@ acpi-y                                += sysfs.o
 acpi-$(CONFIG_X86)             += acpi_cmos_rtc.o
 acpi-$(CONFIG_DEBUG_FS)                += debugfs.o
 acpi-$(CONFIG_ACPI_NUMA)       += numa.o
+acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
 ifdef CONFIG_ACPI_VIDEO
 acpi-y                         += video_detect.o
 endif
index 2c01c1da29ce39f637136a12a512f2a473333233..c67f6f5ad61107b7ded8069a53894763e91ab892 100644 (file)
@@ -52,11 +52,39 @@ MODULE_AUTHOR("Paul Diefenbaugh");
 MODULE_DESCRIPTION("ACPI AC Adapter Driver");
 MODULE_LICENSE("GPL");
 
+static int acpi_ac_add(struct acpi_device *device);
+static int acpi_ac_remove(struct acpi_device *device);
+static void acpi_ac_notify(struct acpi_device *device, u32 event);
+
+static const struct acpi_device_id ac_device_ids[] = {
+       {"ACPI0003", 0},
+       {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, ac_device_ids);
+
+#ifdef CONFIG_PM_SLEEP
+static int acpi_ac_resume(struct device *dev);
+#endif
+static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
+
 static int ac_sleep_before_get_state_ms;
 
+static struct acpi_driver acpi_ac_driver = {
+       .name = "ac",
+       .class = ACPI_AC_CLASS,
+       .ids = ac_device_ids,
+       .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
+       .ops = {
+               .add = acpi_ac_add,
+               .remove = acpi_ac_remove,
+               .notify = acpi_ac_notify,
+               },
+       .drv.pm = &acpi_ac_pm,
+};
+
 struct acpi_ac {
        struct power_supply charger;
-       struct platform_device *pdev;
+       struct acpi_device * device;
        unsigned long long state;
        struct notifier_block battery_nb;
 };
@@ -69,10 +97,12 @@ struct acpi_ac {
 
 static int acpi_ac_get_state(struct acpi_ac *ac)
 {
-       acpi_status status;
-       acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev);
+       acpi_status status = AE_OK;
+
+       if (!ac)
+               return -EINVAL;
 
-       status = acpi_evaluate_integer(handle, "_PSR", NULL,
+       status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL,
                                       &ac->state);
        if (ACPI_FAILURE(status)) {
                ACPI_EXCEPTION((AE_INFO, status,
@@ -117,10 +147,9 @@ static enum power_supply_property ac_props[] = {
                                    Driver Model
    -------------------------------------------------------------------------- */
 
-static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
+static void acpi_ac_notify(struct acpi_device *device, u32 event)
 {
-       struct acpi_ac *ac = data;
-       struct acpi_device *adev;
+       struct acpi_ac *ac = acpi_driver_data(device);
 
        if (!ac)
                return;
@@ -143,11 +172,10 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
                        msleep(ac_sleep_before_get_state_ms);
 
                acpi_ac_get_state(ac);
-               adev = ACPI_COMPANION(&ac->pdev->dev);
-               acpi_bus_generate_netlink_event(adev->pnp.device_class,
-                                               dev_name(&ac->pdev->dev),
-                                               event, (u32) ac->state);
-               acpi_notifier_call_chain(adev, event, (u32) ac->state);
+               acpi_bus_generate_netlink_event(device->pnp.device_class,
+                                                 dev_name(&device->dev), event,
+                                                 (u32) ac->state);
+               acpi_notifier_call_chain(device, event, (u32) ac->state);
                kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
        }
 
@@ -192,49 +220,39 @@ static struct dmi_system_id ac_dmi_table[] = {
        {},
 };
 
-static int acpi_ac_probe(struct platform_device *pdev)
+static int acpi_ac_add(struct acpi_device *device)
 {
        int result = 0;
        struct acpi_ac *ac = NULL;
-       struct acpi_device *adev;
 
-       if (!pdev)
-               return -EINVAL;
 
-       adev = ACPI_COMPANION(&pdev->dev);
-       if (!adev)
-               return -ENODEV;
+       if (!device)
+               return -EINVAL;
 
        ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
        if (!ac)
                return -ENOMEM;
 
-       strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
-       strcpy(acpi_device_class(adev), ACPI_AC_CLASS);
-       ac->pdev = pdev;
-       platform_set_drvdata(pdev, ac);
+       ac->device = device;
+       strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
+       strcpy(acpi_device_class(device), ACPI_AC_CLASS);
+       device->driver_data = ac;
 
        result = acpi_ac_get_state(ac);
        if (result)
                goto end;
 
-       ac->charger.name = acpi_device_bid(adev);
+       ac->charger.name = acpi_device_bid(device);
        ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
        ac->charger.properties = ac_props;
        ac->charger.num_properties = ARRAY_SIZE(ac_props);
        ac->charger.get_property = get_ac_property;
-       result = power_supply_register(&pdev->dev, &ac->charger);
+       result = power_supply_register(&ac->device->dev, &ac->charger);
        if (result)
                goto end;
 
-       result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
-                       ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac);
-       if (result) {
-               power_supply_unregister(&ac->charger);
-               goto end;
-       }
        printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
-              acpi_device_name(adev), acpi_device_bid(adev),
+              acpi_device_name(device), acpi_device_bid(device),
               ac->state ? "on-line" : "off-line");
 
        ac->battery_nb.notifier_call = acpi_ac_battery_notify;
@@ -256,7 +274,7 @@ static int acpi_ac_resume(struct device *dev)
        if (!dev)
                return -EINVAL;
 
-       ac = platform_get_drvdata(to_platform_device(dev));
+       ac = acpi_driver_data(to_acpi_device(dev));
        if (!ac)
                return -EINVAL;
 
@@ -270,19 +288,17 @@ static int acpi_ac_resume(struct device *dev)
 #else
 #define acpi_ac_resume NULL
 #endif
-static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
 
-static int acpi_ac_remove(struct platform_device *pdev)
+static int acpi_ac_remove(struct acpi_device *device)
 {
-       struct acpi_ac *ac;
+       struct acpi_ac *ac = NULL;
+
 
-       if (!pdev)
+       if (!device || !acpi_driver_data(device))
                return -EINVAL;
 
-       acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
-                       ACPI_ALL_NOTIFY, acpi_ac_notify_handler);
+       ac = acpi_driver_data(device);
 
-       ac = platform_get_drvdata(pdev);
        if (ac->charger.dev)
                power_supply_unregister(&ac->charger);
        unregister_acpi_notifier(&ac->battery_nb);
@@ -292,23 +308,6 @@ static int acpi_ac_remove(struct platform_device *pdev)
        return 0;
 }
 
-static const struct acpi_device_id acpi_ac_match[] = {
-       { "ACPI0003", 0 },
-       { }
-};
-MODULE_DEVICE_TABLE(acpi, acpi_ac_match);
-
-static struct platform_driver acpi_ac_driver = {
-       .probe          = acpi_ac_probe,
-       .remove         = acpi_ac_remove,
-       .driver         = {
-               .name   = "acpi-ac",
-               .owner  = THIS_MODULE,
-               .pm     = &acpi_ac_pm_ops,
-               .acpi_match_table = ACPI_PTR(acpi_ac_match),
-       },
-};
-
 static int __init acpi_ac_init(void)
 {
        int result;
@@ -316,7 +315,7 @@ static int __init acpi_ac_init(void)
        if (acpi_disabled)
                return -ENODEV;
 
-       result = platform_driver_register(&acpi_ac_driver);
+       result = acpi_bus_register_driver(&acpi_ac_driver);
        if (result < 0)
                return -ENODEV;
 
@@ -325,7 +324,7 @@ static int __init acpi_ac_init(void)
 
 static void __exit acpi_ac_exit(void)
 {
-       platform_driver_unregister(&acpi_ac_driver);
+       acpi_bus_unregister_driver(&acpi_ac_driver);
 }
 module_init(acpi_ac_init);
 module_exit(acpi_ac_exit);
index dbfe49e5fd63cc179559b2c5caee57d27324c012..1d4950388fa13b9fc6050fbaf419ae68c796f24d 100644 (file)
@@ -29,7 +29,6 @@ ACPI_MODULE_NAME("platform");
 static const struct acpi_device_id acpi_platform_device_ids[] = {
 
        { "PNP0D40" },
-       { "ACPI0003" },
        { "VPC2004" },
        { "BCM4752" },
 
index b06f5f55ada952ced85de9c845dfb49cac421633..52c81c49cc7d8396fa5b36a4d24b757f934ced46 100644 (file)
@@ -405,7 +405,6 @@ static int acpi_processor_add(struct acpi_device *device,
                goto err;
 
        pr->dev = dev;
-       dev->offline = pr->flags.need_hotplug_init;
 
        /* Trigger the processor driver's .probe() if present. */
        if (device_attach(dev) >= 0)
index 49bbc71fad54efd709ba9b7e6a610b5f8a30a7a3..a08a448068dd99981be3eeb9643e0255b20ae8df 100644 (file)
@@ -141,9 +141,9 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
  * address. Although ACPICA adheres to the ACPI specification which
  * requires the use of the corresponding 64-bit address if it is non-zero,
  * some machines have been found to have a corrupted non-zero 64-bit
- * address. Default is FALSE, do not favor the 32-bit addresses.
+ * address. Default is TRUE, favor the 32-bit addresses.
  */
-ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, TRUE);
 
 /*
  * Optionally truncate I/O addresses to 16 bits. Provides compatibility
index a4702eee91a820d131960754c14da6aa62f50939..9fb85f38de90e3b073635ef2b854895b13e7b5b8 100644 (file)
@@ -461,6 +461,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
        u32 table_count;
        struct acpi_table_header *table;
        acpi_physical_address address;
+       acpi_physical_address rsdt_address;
        u32 length;
        u8 *table_entry;
        acpi_status status;
@@ -488,11 +489,14 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
                 * as per the ACPI specification.
                 */
                address = (acpi_physical_address) rsdp->xsdt_physical_address;
+               rsdt_address =
+                   (acpi_physical_address) rsdp->rsdt_physical_address;
                table_entry_size = ACPI_XSDT_ENTRY_SIZE;
        } else {
                /* Root table is an RSDT (32-bit physical addresses) */
 
                address = (acpi_physical_address) rsdp->rsdt_physical_address;
+               rsdt_address = address;
                table_entry_size = ACPI_RSDT_ENTRY_SIZE;
        }
 
@@ -515,8 +519,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
 
                        /* Fall back to the RSDT */
 
-                       address =
-                           (acpi_physical_address) rsdp->rsdt_physical_address;
+                       address = rsdt_address;
                        table_entry_size = ACPI_RSDT_ENTRY_SIZE;
                }
        }
index 9a2c63b2005038476e5a5e77360fd836aad3f25f..6e7b2a12860d31ac533c730e97d080daaa6f1385 100644 (file)
 #include <linux/suspend.h>
 #include <asm/unaligned.h>
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/uaccess.h>
+#endif
+
 #include <linux/acpi.h>
 #include <linux/power_supply.h>
 
@@ -64,6 +70,19 @@ static unsigned int cache_time = 1000;
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+extern struct proc_dir_entry *acpi_lock_battery_dir(void);
+extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
+
+enum acpi_battery_files {
+       info_tag = 0,
+       state_tag,
+       alarm_tag,
+       ACPI_BATTERY_NUMFILES,
+};
+
+#endif
+
 static const struct acpi_device_id battery_device_ids[] = {
        {"PNP0C0A", 0},
        {"", 0},
@@ -299,6 +318,14 @@ static enum power_supply_property energy_battery_props[] = {
        POWER_SUPPLY_PROP_SERIAL_NUMBER,
 };
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+inline char *acpi_battery_units(struct acpi_battery *battery)
+{
+       return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
+               "mA" : "mW";
+}
+#endif
+
 /* --------------------------------------------------------------------------
                                Battery Management
    -------------------------------------------------------------------------- */
@@ -716,6 +743,279 @@ static void acpi_battery_refresh(struct acpi_battery *battery)
        sysfs_add_battery(battery);
 }
 
+/* --------------------------------------------------------------------------
+                              FS Interface (/proc)
+   -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+static struct proc_dir_entry *acpi_battery_dir;
+
+static int acpi_battery_print_info(struct seq_file *seq, int result)
+{
+       struct acpi_battery *battery = seq->private;
+
+       if (result)
+               goto end;
+
+       seq_printf(seq, "present:                 %s\n",
+                  acpi_battery_present(battery) ? "yes" : "no");
+       if (!acpi_battery_present(battery))
+               goto end;
+       if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "design capacity:         unknown\n");
+       else
+               seq_printf(seq, "design capacity:         %d %sh\n",
+                          battery->design_capacity,
+                          acpi_battery_units(battery));
+
+       if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "last full capacity:      unknown\n");
+       else
+               seq_printf(seq, "last full capacity:      %d %sh\n",
+                          battery->full_charge_capacity,
+                          acpi_battery_units(battery));
+
+       seq_printf(seq, "battery technology:      %srechargeable\n",
+                  (!battery->technology)?"non-":"");
+
+       if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "design voltage:          unknown\n");
+       else
+               seq_printf(seq, "design voltage:          %d mV\n",
+                          battery->design_voltage);
+       seq_printf(seq, "design capacity warning: %d %sh\n",
+                  battery->design_capacity_warning,
+                  acpi_battery_units(battery));
+       seq_printf(seq, "design capacity low:     %d %sh\n",
+                  battery->design_capacity_low,
+                  acpi_battery_units(battery));
+       seq_printf(seq, "cycle count:             %i\n", battery->cycle_count);
+       seq_printf(seq, "capacity granularity 1:  %d %sh\n",
+                  battery->capacity_granularity_1,
+                  acpi_battery_units(battery));
+       seq_printf(seq, "capacity granularity 2:  %d %sh\n",
+                  battery->capacity_granularity_2,
+                  acpi_battery_units(battery));
+       seq_printf(seq, "model number:            %s\n", battery->model_number);
+       seq_printf(seq, "serial number:           %s\n", battery->serial_number);
+       seq_printf(seq, "battery type:            %s\n", battery->type);
+       seq_printf(seq, "OEM info:                %s\n", battery->oem_info);
+      end:
+       if (result)
+               seq_printf(seq, "ERROR: Unable to read battery info\n");
+       return result;
+}
+
+static int acpi_battery_print_state(struct seq_file *seq, int result)
+{
+       struct acpi_battery *battery = seq->private;
+
+       if (result)
+               goto end;
+
+       seq_printf(seq, "present:                 %s\n",
+                  acpi_battery_present(battery) ? "yes" : "no");
+       if (!acpi_battery_present(battery))
+               goto end;
+
+       seq_printf(seq, "capacity state:          %s\n",
+                       (battery->state & 0x04) ? "critical" : "ok");
+       if ((battery->state & 0x01) && (battery->state & 0x02))
+               seq_printf(seq,
+                          "charging state:          charging/discharging\n");
+       else if (battery->state & 0x01)
+               seq_printf(seq, "charging state:          discharging\n");
+       else if (battery->state & 0x02)
+               seq_printf(seq, "charging state:          charging\n");
+       else
+               seq_printf(seq, "charging state:          charged\n");
+
+       if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "present rate:            unknown\n");
+       else
+               seq_printf(seq, "present rate:            %d %s\n",
+                          battery->rate_now, acpi_battery_units(battery));
+
+       if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "remaining capacity:      unknown\n");
+       else
+               seq_printf(seq, "remaining capacity:      %d %sh\n",
+                          battery->capacity_now, acpi_battery_units(battery));
+       if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "present voltage:         unknown\n");
+       else
+               seq_printf(seq, "present voltage:         %d mV\n",
+                          battery->voltage_now);
+      end:
+       if (result)
+               seq_printf(seq, "ERROR: Unable to read battery state\n");
+
+       return result;
+}
+
+static int acpi_battery_print_alarm(struct seq_file *seq, int result)
+{
+       struct acpi_battery *battery = seq->private;
+
+       if (result)
+               goto end;
+
+       if (!acpi_battery_present(battery)) {
+               seq_printf(seq, "present:                 no\n");
+               goto end;
+       }
+       seq_printf(seq, "alarm:                   ");
+       if (!battery->alarm)
+               seq_printf(seq, "unsupported\n");
+       else
+               seq_printf(seq, "%u %sh\n", battery->alarm,
+                               acpi_battery_units(battery));
+      end:
+       if (result)
+               seq_printf(seq, "ERROR: Unable to read battery alarm\n");
+       return result;
+}
+
+static ssize_t acpi_battery_write_alarm(struct file *file,
+                                       const char __user * buffer,
+                                       size_t count, loff_t * ppos)
+{
+       int result = 0;
+       char alarm_string[12] = { '\0' };
+       struct seq_file *m = file->private_data;
+       struct acpi_battery *battery = m->private;
+
+       if (!battery || (count > sizeof(alarm_string) - 1))
+               return -EINVAL;
+       if (!acpi_battery_present(battery)) {
+               result = -ENODEV;
+               goto end;
+       }
+       if (copy_from_user(alarm_string, buffer, count)) {
+               result = -EFAULT;
+               goto end;
+       }
+       alarm_string[count] = '\0';
+       battery->alarm = simple_strtol(alarm_string, NULL, 0);
+       result = acpi_battery_set_alarm(battery);
+      end:
+       if (!result)
+               return count;
+       return result;
+}
+
+typedef int(*print_func)(struct seq_file *seq, int result);
+
+static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
+       acpi_battery_print_info,
+       acpi_battery_print_state,
+       acpi_battery_print_alarm,
+};
+
+static int acpi_battery_read(int fid, struct seq_file *seq)
+{
+       struct acpi_battery *battery = seq->private;
+       int result = acpi_battery_update(battery);
+       return acpi_print_funcs[fid](seq, result);
+}
+
+#define DECLARE_FILE_FUNCTIONS(_name) \
+static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
+{ \
+       return acpi_battery_read(_name##_tag, seq); \
+} \
+static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
+{ \
+       return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \
+}
+
+DECLARE_FILE_FUNCTIONS(info);
+DECLARE_FILE_FUNCTIONS(state);
+DECLARE_FILE_FUNCTIONS(alarm);
+
+#undef DECLARE_FILE_FUNCTIONS
+
+#define FILE_DESCRIPTION_RO(_name) \
+       { \
+       .name = __stringify(_name), \
+       .mode = S_IRUGO, \
+       .ops = { \
+               .open = acpi_battery_##_name##_open_fs, \
+               .read = seq_read, \
+               .llseek = seq_lseek, \
+               .release = single_release, \
+               .owner = THIS_MODULE, \
+               }, \
+       }
+
+#define FILE_DESCRIPTION_RW(_name) \
+       { \
+       .name = __stringify(_name), \
+       .mode = S_IFREG | S_IRUGO | S_IWUSR, \
+       .ops = { \
+               .open = acpi_battery_##_name##_open_fs, \
+               .read = seq_read, \
+               .llseek = seq_lseek, \
+               .write = acpi_battery_write_##_name, \
+               .release = single_release, \
+               .owner = THIS_MODULE, \
+               }, \
+       }
+
+static const struct battery_file {
+       struct file_operations ops;
+       umode_t mode;
+       const char *name;
+} acpi_battery_file[] = {
+       FILE_DESCRIPTION_RO(info),
+       FILE_DESCRIPTION_RO(state),
+       FILE_DESCRIPTION_RW(alarm),
+};
+
+#undef FILE_DESCRIPTION_RO
+#undef FILE_DESCRIPTION_RW
+
+static int acpi_battery_add_fs(struct acpi_device *device)
+{
+       struct proc_dir_entry *entry = NULL;
+       int i;
+
+       printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
+                       " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
+       if (!acpi_device_dir(device)) {
+               acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
+                                                    acpi_battery_dir);
+               if (!acpi_device_dir(device))
+                       return -ENODEV;
+       }
+
+       for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
+               entry = proc_create_data(acpi_battery_file[i].name,
+                                        acpi_battery_file[i].mode,
+                                        acpi_device_dir(device),
+                                        &acpi_battery_file[i].ops,
+                                        acpi_driver_data(device));
+               if (!entry)
+                       return -ENODEV;
+       }
+       return 0;
+}
+
+static void acpi_battery_remove_fs(struct acpi_device *device)
+{
+       int i;
+       if (!acpi_device_dir(device))
+               return;
+       for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
+               remove_proc_entry(acpi_battery_file[i].name,
+                                 acpi_device_dir(device));
+
+       remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
+       acpi_device_dir(device) = NULL;
+}
+
+#endif
+
 /* --------------------------------------------------------------------------
                                  Driver Interface
    -------------------------------------------------------------------------- */
@@ -790,6 +1090,15 @@ static int acpi_battery_add(struct acpi_device *device)
        result = acpi_battery_update(battery);
        if (result)
                goto fail;
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       result = acpi_battery_add_fs(device);
+#endif
+       if (result) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+               acpi_battery_remove_fs(device);
+#endif
+               goto fail;
+       }
 
        printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
                ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
@@ -816,6 +1125,9 @@ static int acpi_battery_remove(struct acpi_device *device)
                return -EINVAL;
        battery = acpi_driver_data(device);
        unregister_pm_notifier(&battery->pm_nb);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       acpi_battery_remove_fs(device);
+#endif
        sysfs_remove_battery(battery);
        mutex_destroy(&battery->lock);
        mutex_destroy(&battery->sysfs_lock);
@@ -866,7 +1178,19 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
 
        if (dmi_check_system(bat_dmi_table))
                battery_bix_broken_package = 1;
-       acpi_bus_register_driver(&acpi_battery_driver);
+       
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       acpi_battery_dir = acpi_lock_battery_dir();
+       if (!acpi_battery_dir)
+               return;
+#endif
+       if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+               acpi_unlock_battery_dir(acpi_battery_dir);
+#endif
+               return;
+       }
+       return;
 }
 
 static int __init acpi_battery_init(void)
@@ -878,6 +1202,9 @@ static int __init acpi_battery_init(void)
 static void __exit acpi_battery_exit(void)
 {
        acpi_bus_unregister_driver(&acpi_battery_driver);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       acpi_unlock_battery_dir(acpi_battery_dir);
+#endif
 }
 
 module_init(acpi_battery_init);
index afec4526c48aa04e2921a199396e8fb2ea7489be..3d8413d02a975f0643275a247524a0c9d7569341 100644 (file)
@@ -314,6 +314,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
                },
        },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "Dell Inspiron 7737",
+       .matches = {
+                   DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                   DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
+               },
+       },
 
        /*
         * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
@@ -374,6 +382,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
                },
        },
+       /*
+        * Without this this EEEpc exports a non working WMI interface, with
+        * this it exports a working "good old" eeepc_laptop interface, fixing
+        * both brightness control, and rfkill not working.
+        */
+       {
+       .callback = dmi_enable_osi_linux,
+       .ident = "Asus EEE PC 1015PX",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
+               },
+       },
        {}
 };
 
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
new file mode 100644 (file)
index 0000000..6c9ee68
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or (at
+ *  your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define PREFIX "ACPI: "
+
+ACPI_MODULE_NAME("cm_sbs");
+#define ACPI_AC_CLASS          "ac_adapter"
+#define ACPI_BATTERY_CLASS     "battery"
+#define _COMPONENT             ACPI_SBS_COMPONENT
+static struct proc_dir_entry *acpi_ac_dir;
+static struct proc_dir_entry *acpi_battery_dir;
+
+static DEFINE_MUTEX(cm_sbs_mutex);
+
+static int lock_ac_dir_cnt;
+static int lock_battery_dir_cnt;
+
+struct proc_dir_entry *acpi_lock_ac_dir(void)
+{
+       mutex_lock(&cm_sbs_mutex);
+       if (!acpi_ac_dir)
+               acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
+       if (acpi_ac_dir) {
+               lock_ac_dir_cnt++;
+       } else {
+               printk(KERN_ERR PREFIX
+                                 "Cannot create %s\n", ACPI_AC_CLASS);
+       }
+       mutex_unlock(&cm_sbs_mutex);
+       return acpi_ac_dir;
+}
+EXPORT_SYMBOL(acpi_lock_ac_dir);
+
+void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
+{
+       mutex_lock(&cm_sbs_mutex);
+       if (acpi_ac_dir_param)
+               lock_ac_dir_cnt--;
+       if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
+               remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
+               acpi_ac_dir = NULL;
+       }
+       mutex_unlock(&cm_sbs_mutex);
+}
+EXPORT_SYMBOL(acpi_unlock_ac_dir);
+
+struct proc_dir_entry *acpi_lock_battery_dir(void)
+{
+       mutex_lock(&cm_sbs_mutex);
+       if (!acpi_battery_dir) {
+               acpi_battery_dir =
+                   proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
+       }
+       if (acpi_battery_dir) {
+               lock_battery_dir_cnt++;
+       } else {
+               printk(KERN_ERR PREFIX
+                                 "Cannot create %s\n", ACPI_BATTERY_CLASS);
+       }
+       mutex_unlock(&cm_sbs_mutex);
+       return acpi_battery_dir;
+}
+EXPORT_SYMBOL(acpi_lock_battery_dir);
+
+void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
+{
+       mutex_lock(&cm_sbs_mutex);
+       if (acpi_battery_dir_param)
+               lock_battery_dir_cnt--;
+       if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
+           && acpi_battery_dir) {
+               remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
+               acpi_battery_dir = NULL;
+       }
+       mutex_unlock(&cm_sbs_mutex);
+       return;
+}
+EXPORT_SYMBOL(acpi_unlock_battery_dir);
index c1e31a41f94957d87ded390dfbfcc54333728e3c..25bbc55dca896889886a1e9ed856cfb6fa71135d 100644 (file)
@@ -1278,8 +1278,8 @@ static int __init acpi_thermal_init(void)
 
 static void __exit acpi_thermal_exit(void)
 {
-       destroy_workqueue(acpi_thermal_pm_queue);
        acpi_bus_unregister_driver(&acpi_thermal_driver);
+       destroy_workqueue(acpi_thermal_pm_queue);
 
        return;
 }
index 8b6990e417ec870d7c77e42c994b1e2245b3b88e..f8bc5a755dda411963e097cc41c27bec0752da71 100644 (file)
@@ -457,10 +457,10 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
        },
        {
         .callback = video_set_use_native_backlight,
-        .ident = "ThinkPad T430s",
+        .ident = "ThinkPad T430 and T430s",
         .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"),
                },
        },
        {
@@ -472,7 +472,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                },
        },
        {
-       .callback = video_set_use_native_backlight,
+        .callback = video_set_use_native_backlight,
        .ident = "ThinkPad X1 Carbon",
        .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -500,7 +500,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
         .ident = "Dell Inspiron 7520",
         .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-               DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
                },
        },
        {
@@ -511,6 +511,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"),
                },
        },
+       {
+        .callback = video_set_use_native_backlight,
+        .ident = "Acer Aspire 5742G",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"),
+               },
+       },
        {
         .callback = video_set_use_native_backlight,
         .ident = "Acer Aspire V5-431",
index c2706047337f17c0fad38b3161cabc93d95be0e5..0033fafc470be5f7be9b84da74d689143abb5357 100644 (file)
@@ -815,7 +815,7 @@ config PATA_AT32
 
 config PATA_AT91
        tristate "PATA support for AT91SAM9260"
-       depends on ARM && ARCH_AT91
+       depends on ARM && SOC_AT91SAM9
        help
          This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
 
index 71e15b73513d22ed2bf5ac34afec9b5f42679fe7..60707814a84b19e2581d6309c2a17d4823d1e015 100644 (file)
@@ -1115,6 +1115,17 @@ static bool ahci_broken_online(struct pci_dev *pdev)
        return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
 }
 
+static bool ahci_broken_devslp(struct pci_dev *pdev)
+{
+       /* device with broken DEVSLP but still showing SDS capability */
+       static const struct pci_device_id ids[] = {
+               { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */
+               {}
+       };
+
+       return pci_match_id(ids, pdev);
+}
+
 #ifdef CONFIG_ATA_ACPI
 static void ahci_gtf_filter_workaround(struct ata_host *host)
 {
@@ -1364,6 +1375,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
 
+       /* must set flag prior to save config in order to take effect */
+       if (ahci_broken_devslp(pdev))
+               hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
+
        /* save initial config */
        ahci_pci_save_initial_config(pdev, hpriv);
 
index b5eb886da22635c3c76775bc0ef6374af3464b98..af63c75c20011e10d76be66edb27d595cb47978c 100644 (file)
@@ -236,6 +236,7 @@ enum {
                                                        port start (wait until
                                                        error-handling stage) */
        AHCI_HFLAG_MULTI_MSI            = (1 << 16), /* multiple PCI MSIs */
+       AHCI_HFLAG_NO_DEVSLP            = (1 << 17), /* no device sleep */
 
        /* ap->flags bits */
 
index 497c7abe1c7df5ef79ccd246828b68a1251c5201..8befeb69eeb1133afc62daa43b1fb41af584994e 100644 (file)
 #include "ahci.h"
 
 enum {
-       PORT_PHY_CTL = 0x178,                   /* Port0 PHY Control */
-       PORT_PHY_CTL_PDDQ_LOC = 0x100000,       /* PORT_PHY_CTL bits */
-       HOST_TIMER1MS = 0xe0,                   /* Timer 1-ms */
+       /* Timer 1-ms Register */
+       IMX_TIMER1MS                            = 0x00e0,
+       /* Port0 PHY Control Register */
+       IMX_P0PHYCR                             = 0x0178,
+       IMX_P0PHYCR_TEST_PDDQ                   = 1 << 20,
+       IMX_P0PHYCR_CR_READ                     = 1 << 19,
+       IMX_P0PHYCR_CR_WRITE                    = 1 << 18,
+       IMX_P0PHYCR_CR_CAP_DATA                 = 1 << 17,
+       IMX_P0PHYCR_CR_CAP_ADDR                 = 1 << 16,
+       /* Port0 PHY Status Register */
+       IMX_P0PHYSR                             = 0x017c,
+       IMX_P0PHYSR_CR_ACK                      = 1 << 18,
+       IMX_P0PHYSR_CR_DATA_OUT                 = 0xffff << 0,
+       /* Lane0 Output Status Register */
+       IMX_LANE0_OUT_STAT                      = 0x2003,
+       IMX_LANE0_OUT_STAT_RX_PLL_STATE         = 1 << 1,
+       /* Clock Reset Register */
+       IMX_CLOCK_RESET                         = 0x7f3f,
+       IMX_CLOCK_RESET_RESET                   = 1 << 0,
 };
 
 enum ahci_imx_type {
@@ -54,9 +70,149 @@ MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support
 
 static void ahci_imx_host_stop(struct ata_host *host);
 
+static int imx_phy_crbit_assert(void __iomem *mmio, u32 bit, bool assert)
+{
+       int timeout = 10;
+       u32 crval;
+       u32 srval;
+
+       /* Assert or deassert the bit */
+       crval = readl(mmio + IMX_P0PHYCR);
+       if (assert)
+               crval |= bit;
+       else
+               crval &= ~bit;
+       writel(crval, mmio + IMX_P0PHYCR);
+
+       /* Wait for the cr_ack signal */
+       do {
+               srval = readl(mmio + IMX_P0PHYSR);
+               if ((assert ? srval : ~srval) & IMX_P0PHYSR_CR_ACK)
+                       break;
+               usleep_range(100, 200);
+       } while (--timeout);
+
+       return timeout ? 0 : -ETIMEDOUT;
+}
+
+static int imx_phy_reg_addressing(u16 addr, void __iomem *mmio)
+{
+       u32 crval = addr;
+       int ret;
+
+       /* Supply the address on cr_data_in */
+       writel(crval, mmio + IMX_P0PHYCR);
+
+       /* Assert the cr_cap_addr signal */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, true);
+       if (ret)
+               return ret;
+
+       /* Deassert cr_cap_addr */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, false);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int imx_phy_reg_write(u16 val, void __iomem *mmio)
+{
+       u32 crval = val;
+       int ret;
+
+       /* Supply the data on cr_data_in */
+       writel(crval, mmio + IMX_P0PHYCR);
+
+       /* Assert the cr_cap_data signal */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, true);
+       if (ret)
+               return ret;
+
+       /* Deassert cr_cap_data */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, false);
+       if (ret)
+               return ret;
+
+       if (val & IMX_CLOCK_RESET_RESET) {
+               /*
+                * In case we're resetting the phy, it's unable to acknowledge,
+                * so we return immediately here.
+                */
+               crval |= IMX_P0PHYCR_CR_WRITE;
+               writel(crval, mmio + IMX_P0PHYCR);
+               goto out;
+       }
+
+       /* Assert the cr_write signal */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, true);
+       if (ret)
+               return ret;
+
+       /* Deassert cr_write */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, false);
+       if (ret)
+               return ret;
+
+out:
+       return 0;
+}
+
+static int imx_phy_reg_read(u16 *val, void __iomem *mmio)
+{
+       int ret;
+
+       /* Assert the cr_read signal */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, true);
+       if (ret)
+               return ret;
+
+       /* Capture the data from cr_data_out[] */
+       *val = readl(mmio + IMX_P0PHYSR) & IMX_P0PHYSR_CR_DATA_OUT;
+
+       /* Deassert cr_read */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, false);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int imx_sata_phy_reset(struct ahci_host_priv *hpriv)
+{
+       void __iomem *mmio = hpriv->mmio;
+       int timeout = 10;
+       u16 val;
+       int ret;
+
+       /* Reset SATA PHY by setting RESET bit of PHY register CLOCK_RESET */
+       ret = imx_phy_reg_addressing(IMX_CLOCK_RESET, mmio);
+       if (ret)
+               return ret;
+       ret = imx_phy_reg_write(IMX_CLOCK_RESET_RESET, mmio);
+       if (ret)
+               return ret;
+
+       /* Wait for PHY RX_PLL to be stable */
+       do {
+               usleep_range(100, 200);
+               ret = imx_phy_reg_addressing(IMX_LANE0_OUT_STAT, mmio);
+               if (ret)
+                       return ret;
+               ret = imx_phy_reg_read(&val, mmio);
+               if (ret)
+                       return ret;
+               if (val & IMX_LANE0_OUT_STAT_RX_PLL_STATE)
+                       break;
+       } while (--timeout);
+
+       return timeout ? 0 : -ETIMEDOUT;
+}
+
 static int imx_sata_enable(struct ahci_host_priv *hpriv)
 {
        struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+       struct device *dev = &imxpriv->ahci_pdev->dev;
        int ret;
 
        if (imxpriv->no_device)
@@ -101,6 +257,14 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
                regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
                                   IMX6Q_GPR13_SATA_MPLL_CLK_EN,
                                   IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+
+               usleep_range(100, 200);
+
+               ret = imx_sata_phy_reset(hpriv);
+               if (ret) {
+                       dev_err(dev, "failed to reset phy: %d\n", ret);
+                       goto disable_regulator;
+               }
        }
 
        usleep_range(1000, 2000);
@@ -156,8 +320,8 @@ static void ahci_imx_error_handler(struct ata_port *ap)
         * without full reset once the pddq mode is enabled making it
         * impossible to use as part of libata LPM.
         */
-       reg_val = readl(mmio + PORT_PHY_CTL);
-       writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL);
+       reg_val = readl(mmio + IMX_P0PHYCR);
+       writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
        imx_sata_disable(hpriv);
        imxpriv->no_device = true;
 }
@@ -217,6 +381,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
        if (!imxpriv)
                return -ENOMEM;
 
+       imxpriv->ahci_pdev = pdev;
        imxpriv->no_device = false;
        imxpriv->first_time = true;
        imxpriv->type = (enum ahci_imx_type)of_id->data;
@@ -248,7 +413,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
 
        /*
         * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
-        * and IP vendor specific register HOST_TIMER1MS.
+        * and IP vendor specific register IMX_TIMER1MS.
         * Configure CAP_SSS (support stagered spin up).
         * Implement the port0.
         * Get the ahb clock rate, and configure the TIMER1MS register.
@@ -265,7 +430,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
        }
 
        reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
-       writel(reg_val, hpriv->mmio + HOST_TIMER1MS);
+       writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
 
        ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0);
        if (ret)
index 6bd4f660b4e15966ca2c351b4501c0521491de32..b9861453fc8148612a740418f5fee3088d7b65a1 100644 (file)
@@ -452,6 +452,13 @@ void ahci_save_initial_config(struct device *dev,
                cap &= ~HOST_CAP_SNTF;
        }
 
+       if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) {
+               dev_info(dev,
+                        "controller can't do DEVSLP, turning off\n");
+               cap2 &= ~HOST_CAP2_SDS;
+               cap2 &= ~HOST_CAP2_SADM;
+       }
+
        if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
                dev_info(dev, "controller can do FBS, turning on CAP_FBS\n");
                cap |= HOST_CAP_FBS;
index 943cc8b83e59bb7f1b293abce887be717047ffff..ea83828bfea94b41eae6947cfbbbaa8dfbc15b5a 100644 (file)
@@ -6314,6 +6314,8 @@ int ata_host_activate(struct ata_host *host, int irq,
 static void ata_port_detach(struct ata_port *ap)
 {
        unsigned long flags;
+       struct ata_link *link;
+       struct ata_device *dev;
 
        if (!ap->ops->error_handler)
                goto skip_eh;
@@ -6333,6 +6335,13 @@ static void ata_port_detach(struct ata_port *ap)
        cancel_delayed_work_sync(&ap->hotplug_task);
 
  skip_eh:
+       /* clean up zpodd on port removal */
+       ata_for_each_link(link, ap, HOST_FIRST) {
+               ata_for_each_dev(dev, link, ALL) {
+                       if (zpodd_dev_enabled(dev))
+                               zpodd_exit(dev);
+               }
+       }
        if (ap->pmp_link) {
                int i;
                for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
index 6d8a87f252de563f1251872e5bccbd25e2e64cf7..cb9b1f8326c3c6e1327b2c4505bc94aa143cf215 100644 (file)
@@ -144,11 +144,11 @@ static void virtblk_done(struct virtqueue *vq)
                if (unlikely(virtqueue_is_broken(vq)))
                        break;
        } while (!virtqueue_enable_cb(vq));
-       spin_unlock_irqrestore(&vblk->vq_lock, flags);
 
        /* In case queue is stopped waiting for more buffers. */
        if (req_done)
                blk_mq_start_stopped_hw_queues(vblk->disk->queue);
+       spin_unlock_irqrestore(&vblk->vq_lock, flags);
 }
 
 static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
@@ -202,8 +202,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
        err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
        if (err) {
                virtqueue_kick(vblk->vq);
-               spin_unlock_irqrestore(&vblk->vq_lock, flags);
                blk_mq_stop_hw_queue(hctx);
+               spin_unlock_irqrestore(&vblk->vq_lock, flags);
                /* Out of mem doesn't actually happen, since we fall back
                 * to direct descriptors */
                if (err == -ENOMEM || err == -ENOSPC)
index 293e2e0a0a87c7d9877c27524fd98503ba16c1cf..00b73448b22ea7b77a55785cfbbddf022bf4bfd3 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/debugfs.h>
+#include <linux/log2.h>
 
 /*
  * DDR target is the same on all platforms.
@@ -222,12 +223,6 @@ static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus,
                 */
                if ((u64)base < wend && end > wbase)
                        return 0;
-
-               /*
-                * Check if target/attribute conflicts
-                */
-               if (target == wtarget && attr == wattr)
-                       return 0;
        }
 
        return 1;
@@ -266,6 +261,17 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
                mbus->soc->win_cfg_offset(win);
        u32 ctrl, remap_addr;
 
+       if (!is_power_of_2(size)) {
+               WARN(true, "Invalid MBus window size: 0x%zx\n", size);
+               return -EINVAL;
+       }
+
+       if ((base & (phys_addr_t)(size - 1)) != 0) {
+               WARN(true, "Invalid MBus base/size: %pa len 0x%zx\n", &base,
+                    size);
+               return -EINVAL;
+       }
+
        ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
                (attr << WIN_CTRL_ATTR_SHIFT)    |
                (target << WIN_CTRL_TGT_SHIFT)   |
@@ -413,6 +419,10 @@ static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
                           win, (unsigned long long)wbase,
                           (unsigned long long)(wbase + wsize), wtarget, wattr);
 
+               if (!is_power_of_2(wsize) ||
+                   ((wbase & (u64)(wsize - 1)) != 0))
+                       seq_puts(seq, " (Invalid base/size!!)");
+
                if (win < mbus->soc->num_remappable_wins) {
                        seq_printf(seq, " (remap %016llx)\n",
                                   (unsigned long long)wremap);
index 6b75713d953a4e719cd33610f6034bec430c5d95..102c50d38902ca43fed85641618ba202a7251679 100644 (file)
@@ -995,8 +995,11 @@ retry:
                ibytes = min_t(size_t, ibytes, have_bytes - reserved);
        if (ibytes < min)
                ibytes = 0;
-       entropy_count = max_t(int, 0,
-                             entropy_count - (ibytes << (ENTROPY_SHIFT + 3)));
+       if (have_bytes >= ibytes + reserved)
+               entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
+       else
+               entropy_count = reserved << (ENTROPY_SHIFT + 3);
+
        if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
                goto retry;
 
index b3ea223585bdeac64de19606f96b21b280c96210..61dcc8011ec711246727ce5c2750277f33c219f4 100644 (file)
@@ -328,13 +328,11 @@ int tpm_add_ppi(struct kobject *parent)
        /* Cache TPM ACPI handle and version string */
        acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
                            ppi_callback, NULL, NULL, &tpm_ppi_handle);
-       if (tpm_ppi_handle == NULL)
-               return -ENODEV;
-
-       return sysfs_create_group(parent, &ppi_attr_grp);
+       return tpm_ppi_handle ? sysfs_create_group(parent, &ppi_attr_grp) : 0;
 }
 
 void tpm_remove_ppi(struct kobject *parent)
 {
-       sysfs_remove_group(parent, &ppi_attr_grp);
+       if (tpm_ppi_handle)
+               sysfs_remove_group(parent, &ppi_attr_grp);
 }
index c7607feb18dd159b7626fd48340c764f50738c10..54a06526f64f09d7a49b8e043bd91d2e3d2dfffd 100644 (file)
@@ -27,7 +27,7 @@ LIST_HEAD(ccu_list);  /* The list of set up CCUs */
 
 static bool clk_requires_trigger(struct kona_clk *bcm_clk)
 {
-       struct peri_clk_data *peri = bcm_clk->peri;
+       struct peri_clk_data *peri = bcm_clk->u.peri;
        struct bcm_clk_sel *sel;
        struct bcm_clk_div *div;
 
@@ -63,7 +63,7 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
        u32 limit;
 
        BUG_ON(bcm_clk->type != bcm_clk_peri);
-       peri = bcm_clk->peri;
+       peri = bcm_clk->u.peri;
        name = bcm_clk->name;
        range = bcm_clk->ccu->range;
 
@@ -81,19 +81,19 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
 
        div = &peri->div;
        if (divider_exists(div)) {
-               if (div->offset > limit) {
+               if (div->u.s.offset > limit) {
                        pr_err("%s: bad divider offset for %s (%u > %u)\n",
-                               __func__, name, div->offset, limit);
+                               __func__, name, div->u.s.offset, limit);
                        return false;
                }
        }
 
        div = &peri->pre_div;
        if (divider_exists(div)) {
-               if (div->offset > limit) {
+               if (div->u.s.offset > limit) {
                        pr_err("%s: bad pre-divider offset for %s "
                                        "(%u > %u)\n",
-                               __func__, name, div->offset, limit);
+                               __func__, name, div->u.s.offset, limit);
                        return false;
                }
        }
@@ -249,21 +249,22 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name,
 {
        if (divider_is_fixed(div)) {
                /* Any fixed divider value but 0 is OK */
-               if (div->fixed == 0) {
+               if (div->u.fixed == 0) {
                        pr_err("%s: bad %s fixed value 0 for %s\n", __func__,
                                field_name, clock_name);
                        return false;
                }
                return true;
        }
-       if (!bitfield_valid(div->shift, div->width, field_name, clock_name))
+       if (!bitfield_valid(div->u.s.shift, div->u.s.width,
+                               field_name, clock_name))
                return false;
 
        if (divider_has_fraction(div))
-               if (div->frac_width > div->width) {
+               if (div->u.s.frac_width > div->u.s.width) {
                        pr_warn("%s: bad %s fraction width for %s (%u > %u)\n",
                                __func__, field_name, clock_name,
-                               div->frac_width, div->width);
+                               div->u.s.frac_width, div->u.s.width);
                        return false;
                }
 
@@ -278,7 +279,7 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name,
  */
 static bool kona_dividers_valid(struct kona_clk *bcm_clk)
 {
-       struct peri_clk_data *peri = bcm_clk->peri;
+       struct peri_clk_data *peri = bcm_clk->u.peri;
        struct bcm_clk_div *div;
        struct bcm_clk_div *pre_div;
        u32 limit;
@@ -295,7 +296,7 @@ static bool kona_dividers_valid(struct kona_clk *bcm_clk)
 
        limit = BITS_PER_BYTE * sizeof(u32);
 
-       return div->frac_width + pre_div->frac_width <= limit;
+       return div->u.s.frac_width + pre_div->u.s.frac_width <= limit;
 }
 
 
@@ -328,7 +329,7 @@ peri_clk_data_valid(struct kona_clk *bcm_clk)
        if (!peri_clk_data_offsets_valid(bcm_clk))
                return false;
 
-       peri = bcm_clk->peri;
+       peri = bcm_clk->u.peri;
        name = bcm_clk->name;
        gate = &peri->gate;
        if (gate_exists(gate) && !gate_valid(gate, "gate", name))
@@ -588,12 +589,12 @@ static void bcm_clk_teardown(struct kona_clk *bcm_clk)
 {
        switch (bcm_clk->type) {
        case bcm_clk_peri:
-               peri_clk_teardown(bcm_clk->data, &bcm_clk->init_data);
+               peri_clk_teardown(bcm_clk->u.data, &bcm_clk->init_data);
                break;
        default:
                break;
        }
-       bcm_clk->data = NULL;
+       bcm_clk->u.data = NULL;
        bcm_clk->type = bcm_clk_none;
 }
 
@@ -644,7 +645,7 @@ struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name,
                break;
        }
        bcm_clk->type = type;
-       bcm_clk->data = data;
+       bcm_clk->u.data = data;
 
        /* Make sure everything makes sense before we set it up */
        if (!kona_clk_valid(bcm_clk)) {
index e3d339e08309f66ac61d2286c7c28bca9c4cec76..db11a87449f236c6bc1fcf4e5f6084750a1d4f3a 100644 (file)
@@ -61,7 +61,7 @@ u64 do_div_round_closest(u64 dividend, unsigned long divisor)
 /* Convert a divider into the scaled divisor value it represents. */
 static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
 {
-       return (u64)reg_div + ((u64)1 << div->frac_width);
+       return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
 }
 
 /*
@@ -77,7 +77,7 @@ u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
        BUG_ON(billionths >= BILLION);
 
        combined = (u64)div_value * BILLION + billionths;
-       combined <<= div->frac_width;
+       combined <<= div->u.s.frac_width;
 
        return do_div_round_closest(combined, BILLION);
 }
@@ -87,7 +87,7 @@ static inline u64
 scaled_div_min(struct bcm_clk_div *div)
 {
        if (divider_is_fixed(div))
-               return (u64)div->fixed;
+               return (u64)div->u.fixed;
 
        return scaled_div_value(div, 0);
 }
@@ -98,9 +98,9 @@ u64 scaled_div_max(struct bcm_clk_div *div)
        u32 reg_div;
 
        if (divider_is_fixed(div))
-               return (u64)div->fixed;
+               return (u64)div->u.fixed;
 
-       reg_div = ((u32)1 << div->width) - 1;
+       reg_div = ((u32)1 << div->u.s.width) - 1;
 
        return scaled_div_value(div, reg_div);
 }
@@ -115,7 +115,7 @@ divider(struct bcm_clk_div *div, u64 scaled_div)
        BUG_ON(scaled_div < scaled_div_min(div));
        BUG_ON(scaled_div > scaled_div_max(div));
 
-       return (u32)(scaled_div - ((u64)1 << div->frac_width));
+       return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
 }
 
 /* Return a rate scaled for use when dividing by a scaled divisor. */
@@ -125,7 +125,7 @@ scale_rate(struct bcm_clk_div *div, u32 rate)
        if (divider_is_fixed(div))
                return (u64)rate;
 
-       return (u64)rate << div->frac_width;
+       return (u64)rate << div->u.s.frac_width;
 }
 
 /* CCU access */
@@ -398,14 +398,14 @@ static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
        u32 reg_div;
 
        if (divider_is_fixed(div))
-               return (u64)div->fixed;
+               return (u64)div->u.fixed;
 
        flags = ccu_lock(ccu);
-       reg_val = __ccu_read(ccu, div->offset);
+       reg_val = __ccu_read(ccu, div->u.s.offset);
        ccu_unlock(ccu, flags);
 
        /* Extract the full divider field from the register value */
-       reg_div = bitfield_extract(reg_val, div->shift, div->width);
+       reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
 
        /* Return the scaled divisor value it represents */
        return scaled_div_value(div, reg_div);
@@ -433,16 +433,17 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
         * state was defined in the device tree, we just find out
         * what its current value is rather than updating it.
         */
-       if (div->scaled_div == BAD_SCALED_DIV_VALUE) {
-               reg_val = __ccu_read(ccu, div->offset);
-               reg_div = bitfield_extract(reg_val, div->shift, div->width);
-               div->scaled_div = scaled_div_value(div, reg_div);
+       if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
+               reg_val = __ccu_read(ccu, div->u.s.offset);
+               reg_div = bitfield_extract(reg_val, div->u.s.shift,
+                                               div->u.s.width);
+               div->u.s.scaled_div = scaled_div_value(div, reg_div);
 
                return 0;
        }
 
        /* Convert the scaled divisor to the value we need to record */
-       reg_div = divider(div, div->scaled_div);
+       reg_div = divider(div, div->u.s.scaled_div);
 
        /* Clock needs to be enabled before changing the rate */
        enabled = __is_clk_gate_enabled(ccu, gate);
@@ -452,9 +453,10 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
        }
 
        /* Replace the divider value and record the result */
-       reg_val = __ccu_read(ccu, div->offset);
-       reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div);
-       __ccu_write(ccu, div->offset, reg_val);
+       reg_val = __ccu_read(ccu, div->u.s.offset);
+       reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
+                                       reg_div);
+       __ccu_write(ccu, div->u.s.offset, reg_val);
 
        /* If the trigger fails we still want to disable the gate */
        if (!__clk_trigger(ccu, trig))
@@ -490,11 +492,11 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
 
        BUG_ON(divider_is_fixed(div));
 
-       previous = div->scaled_div;
+       previous = div->u.s.scaled_div;
        if (previous == scaled_div)
                return 0;       /* No change */
 
-       div->scaled_div = scaled_div;
+       div->u.s.scaled_div = scaled_div;
 
        flags = ccu_lock(ccu);
        __ccu_write_enable(ccu);
@@ -505,7 +507,7 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
        ccu_unlock(ccu, flags);
 
        if (ret)
-               div->scaled_div = previous;             /* Revert the change */
+               div->u.s.scaled_div = previous;         /* Revert the change */
 
        return ret;
 
@@ -802,7 +804,7 @@ static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
 static int kona_peri_clk_enable(struct clk_hw *hw)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
+       struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
 
        return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true);
 }
@@ -810,7 +812,7 @@ static int kona_peri_clk_enable(struct clk_hw *hw)
 static void kona_peri_clk_disable(struct clk_hw *hw)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
+       struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
 
        (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false);
 }
@@ -818,7 +820,7 @@ static void kona_peri_clk_disable(struct clk_hw *hw)
 static int kona_peri_clk_is_enabled(struct clk_hw *hw)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
+       struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
 
        return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
 }
@@ -827,7 +829,7 @@ static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
                        unsigned long parent_rate)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct peri_clk_data *data = bcm_clk->peri;
+       struct peri_clk_data *data = bcm_clk->u.peri;
 
        return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
                                parent_rate);
@@ -837,20 +839,20 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
                        unsigned long *parent_rate)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct bcm_clk_div *div = &bcm_clk->peri->div;
+       struct bcm_clk_div *div = &bcm_clk->u.peri->div;
 
        if (!divider_exists(div))
                return __clk_get_rate(hw->clk);
 
        /* Quietly avoid a zero rate */
-       return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div,
+       return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
                                rate ? rate : 1, *parent_rate, NULL);
 }
 
 static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct peri_clk_data *data = bcm_clk->peri;
+       struct peri_clk_data *data = bcm_clk->u.peri;
        struct bcm_clk_sel *sel = &data->sel;
        struct bcm_clk_trig *trig;
        int ret;
@@ -884,7 +886,7 @@ static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
 static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct peri_clk_data *data = bcm_clk->peri;
+       struct peri_clk_data *data = bcm_clk->u.peri;
        u8 index;
 
        index = selector_read_index(bcm_clk->ccu, &data->sel);
@@ -897,7 +899,7 @@ static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
                        unsigned long parent_rate)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct peri_clk_data *data = bcm_clk->peri;
+       struct peri_clk_data *data = bcm_clk->u.peri;
        struct bcm_clk_div *div = &data->div;
        u64 scaled_div = 0;
        int ret;
@@ -958,7 +960,7 @@ struct clk_ops kona_peri_clk_ops = {
 static bool __peri_clk_init(struct kona_clk *bcm_clk)
 {
        struct ccu_data *ccu = bcm_clk->ccu;
-       struct peri_clk_data *peri = bcm_clk->peri;
+       struct peri_clk_data *peri = bcm_clk->u.peri;
        const char *name = bcm_clk->name;
        struct bcm_clk_trig *trig;
 
index 5e139adc3dc5909deee925ef38d2915894d52014..dee690951bb6c21677515039d5cac39770418414 100644 (file)
@@ -57,7 +57,7 @@
 #define divider_exists(div)            FLAG_TEST(div, DIV, EXISTS)
 #define divider_is_fixed(div)          FLAG_TEST(div, DIV, FIXED)
 #define divider_has_fraction(div)      (!divider_is_fixed(div) && \
-                                               (div)->frac_width > 0)
+                                               (div)->u.s.frac_width > 0)
 
 #define selector_exists(sel)           ((sel)->width != 0)
 #define trigger_exists(trig)           FLAG_TEST(trig, TRIG, EXISTS)
@@ -244,9 +244,9 @@ struct bcm_clk_div {
                        u32 frac_width; /* field fraction width */
 
                        u64 scaled_div; /* scaled divider value */
-               };
+               } s;
                u32 fixed;      /* non-zero fixed divider value */
-       };
+       } u;
        u32 flags;              /* BCM_CLK_DIV_FLAGS_* below */
 };
 
@@ -263,28 +263,28 @@ struct bcm_clk_div {
 /* A fixed (non-zero) divider */
 #define FIXED_DIVIDER(_value)                                          \
        {                                                               \
-               .fixed = (_value),                                      \
+               .u.fixed = (_value),                                    \
                .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED),            \
        }
 
 /* A divider with an integral divisor */
 #define DIVIDER(_offset, _shift, _width)                               \
        {                                                               \
-               .offset = (_offset),                                    \
-               .shift = (_shift),                                      \
-               .width = (_width),                                      \
-               .scaled_div = BAD_SCALED_DIV_VALUE,                     \
+               .u.s.offset = (_offset),                                \
+               .u.s.shift = (_shift),                                  \
+               .u.s.width = (_width),                                  \
+               .u.s.scaled_div = BAD_SCALED_DIV_VALUE,                 \
                .flags = FLAG(DIV, EXISTS),                             \
        }
 
 /* A divider whose divisor has an integer and fractional part */
 #define FRAC_DIVIDER(_offset, _shift, _width, _frac_width)             \
        {                                                               \
-               .offset = (_offset),                                    \
-               .shift = (_shift),                                      \
-               .width = (_width),                                      \
-               .frac_width = (_frac_width),                            \
-               .scaled_div = BAD_SCALED_DIV_VALUE,                     \
+               .u.s.offset = (_offset),                                \
+               .u.s.shift = (_shift),                                  \
+               .u.s.width = (_width),                                  \
+               .u.s.frac_width = (_frac_width),                        \
+               .u.s.scaled_div = BAD_SCALED_DIV_VALUE,                 \
                .flags = FLAG(DIV, EXISTS),                             \
        }
 
@@ -380,7 +380,7 @@ struct kona_clk {
        union {
                void *data;
                struct peri_clk_data *peri;
-       };
+       } u;
 };
 #define to_kona_clk(_hw) \
        container_of(_hw, struct kona_clk, hw)
index ec22112e569f7f3dc8f7c9477d0e3f99951e66c6..3fbee45402285c451ce88c35233a7eb352f53274 100644 (file)
@@ -144,6 +144,37 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
        return true;
 }
 
+static int _round_up_table(const struct clk_div_table *table, int div)
+{
+       const struct clk_div_table *clkt;
+       int up = INT_MAX;
+
+       for (clkt = table; clkt->div; clkt++) {
+               if (clkt->div == div)
+                       return clkt->div;
+               else if (clkt->div < div)
+                       continue;
+
+               if ((clkt->div - div) < (up - div))
+                       up = clkt->div;
+       }
+
+       return up;
+}
+
+static int _div_round_up(struct clk_divider *divider,
+               unsigned long parent_rate, unsigned long rate)
+{
+       int div = DIV_ROUND_UP(parent_rate, rate);
+
+       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+               div = __roundup_pow_of_two(div);
+       if (divider->table)
+               div = _round_up_table(divider->table, div);
+
+       return div;
+}
+
 static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
                unsigned long *best_parent_rate)
 {
@@ -159,7 +190,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
 
        if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
                parent_rate = *best_parent_rate;
-               bestdiv = DIV_ROUND_UP(parent_rate, rate);
+               bestdiv = _div_round_up(divider, parent_rate, rate);
                bestdiv = bestdiv == 0 ? 1 : bestdiv;
                bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
                return bestdiv;
@@ -219,6 +250,10 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
        u32 val;
 
        div = DIV_ROUND_UP(parent_rate, rate);
+
+       if (!_is_valid_div(divider, div))
+               return -EINVAL;
+
        value = _get_val(divider, div);
 
        if (value > div_mask(divider))
index dff0373f53c1fb1df1784366a28c35323cde17a5..7cf2c093cc54f28dbebd9482e43233a49daf02e3 100644 (file)
@@ -1984,9 +1984,28 @@ struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
 }
 EXPORT_SYMBOL_GPL(__clk_register);
 
-static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
+/**
+ * clk_register - allocate a new clock, register it and return an opaque cookie
+ * @dev: device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * clk_register is the primary interface for populating the clock tree with new
+ * clock nodes.  It returns a pointer to the newly allocated struct clk which
+ * cannot be dereferenced by driver code but may be used in conjuction with the
+ * rest of the clock API.  In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
+ */
+struct clk *clk_register(struct device *dev, struct clk_hw *hw)
 {
        int i, ret;
+       struct clk *clk;
+
+       clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+       if (!clk) {
+               pr_err("%s: could not allocate clk\n", __func__);
+               ret = -ENOMEM;
+               goto fail_out;
+       }
 
        clk->name = kstrdup(hw->init->name, GFP_KERNEL);
        if (!clk->name) {
@@ -2026,7 +2045,7 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
 
        ret = __clk_init(dev, clk);
        if (!ret)
-               return 0;
+               return clk;
 
 fail_parent_names_copy:
        while (--i >= 0)
@@ -2035,36 +2054,6 @@ fail_parent_names_copy:
 fail_parent_names:
        kfree(clk->name);
 fail_name:
-       return ret;
-}
-
-/**
- * clk_register - allocate a new clock, register it and return an opaque cookie
- * @dev: device that is registering this clock
- * @hw: link to hardware-specific clock data
- *
- * clk_register is the primary interface for populating the clock tree with new
- * clock nodes.  It returns a pointer to the newly allocated struct clk which
- * cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API.  In the event of an error clk_register will return an
- * error code; drivers must test for an error code after calling clk_register.
- */
-struct clk *clk_register(struct device *dev, struct clk_hw *hw)
-{
-       int ret;
-       struct clk *clk;
-
-       clk = kzalloc(sizeof(*clk), GFP_KERNEL);
-       if (!clk) {
-               pr_err("%s: could not allocate clk\n", __func__);
-               ret = -ENOMEM;
-               goto fail_out;
-       }
-
-       ret = _clk_register(dev, hw, clk);
-       if (!ret)
-               return clk;
-
        kfree(clk);
 fail_out:
        return ERR_PTR(ret);
@@ -2151,9 +2140,10 @@ void clk_unregister(struct clk *clk)
 
        if (!hlist_empty(&clk->children)) {
                struct clk *child;
+               struct hlist_node *t;
 
                /* Reparent all children to the orphan list. */
-               hlist_for_each_entry(child, &clk->children, child_node)
+               hlist_for_each_entry_safe(child, t, &clk->children, child_node)
                        clk_set_parent(child, NULL);
        }
 
@@ -2173,7 +2163,7 @@ EXPORT_SYMBOL_GPL(clk_unregister);
 
 static void devm_clk_release(struct device *dev, void *res)
 {
-       clk_unregister(res);
+       clk_unregister(*(struct clk **)res);
 }
 
 /**
@@ -2188,18 +2178,18 @@ static void devm_clk_release(struct device *dev, void *res)
 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
 {
        struct clk *clk;
-       int ret;
+       struct clk **clkp;
 
-       clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
-       if (!clk)
+       clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
+       if (!clkp)
                return ERR_PTR(-ENOMEM);
 
-       ret = _clk_register(dev, hw, clk);
-       if (!ret) {
-               devres_add(dev, clk);
+       clk = clk_register(dev, hw);
+       if (!IS_ERR(clk)) {
+               *clkp = clk;
+               devres_add(dev, clkp);
        } else {
-               devres_free(clk);
-               clk = ERR_PTR(ret);
+               devres_free(clkp);
        }
 
        return clk;
index 2e5810c88d1150874ece970fb64136a450ef64a6..1f6324e29a8099b09930f26234c703d7973be686 100644 (file)
@@ -156,6 +156,7 @@ cpg_mstp_clock_register(const char *name, const char *parent_name,
 static void __init cpg_mstp_clocks_init(struct device_node *np)
 {
        struct mstp_clock_group *group;
+       const char *idxname;
        struct clk **clks;
        unsigned int i;
 
@@ -184,6 +185,11 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
        for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
                clks[i] = ERR_PTR(-ENOENT);
 
+       if (of_find_property(np, "clock-indices", &i))
+               idxname = "clock-indices";
+       else
+               idxname = "renesas,clock-indices";
+
        for (i = 0; i < MSTP_MAX_CLOCKS; ++i) {
                const char *parent_name;
                const char *name;
@@ -197,8 +203,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
                        continue;
 
                parent_name = of_clk_get_parent_name(np, i);
-               ret = of_property_read_u32_index(np, "renesas,clock-indices", i,
-                                                &clkidx);
+               ret = of_property_read_u32_index(np, idxname, i, &clkidx);
                if (parent_name == NULL || ret < 0)
                        break;
 
index 88dafb5e96270fa13923c5a95792c910a1bd00d4..de6da957a09d6ebe82f416370c84a7dc50acea8e 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 
 #include "clk.h"
 
@@ -43,6 +44,8 @@
 
 #define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
 
+void __iomem *clk_mgr_base_addr;
+
 static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
                                         unsigned long parent_rate)
 {
@@ -87,6 +90,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
        const char *clk_name = node->name;
        const char *parent_name[SOCFPGA_MAX_PARENTS];
        struct clk_init_data init;
+       struct device_node *clkmgr_np;
        int rc;
        int i = 0;
 
@@ -96,6 +100,9 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
        if (WARN_ON(!pll_clk))
                return NULL;
 
+       clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
+       clk_mgr_base_addr = of_iomap(clkmgr_np, 0);
+       BUG_ON(!clk_mgr_base_addr);
        pll_clk->hw.reg = clk_mgr_base_addr + reg;
 
        of_property_read_string(node, "clock-output-names", &clk_name);
index 35a960a993f95c72b6247e52032c4184cbdd00b1..43db947e5f0e51e60c232832d379faca9ca386d3 100644 (file)
  * You should have received a copy of the GNU General Public License
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/io.h>
 #include <linux/of.h>
-#include <linux/of_address.h>
 
 #include "clk.h"
 
-void __iomem *clk_mgr_base_addr;
-
-static const struct of_device_id socfpga_child_clocks[] __initconst = {
-       { .compatible = "altr,socfpga-pll-clock", socfpga_pll_init, },
-       { .compatible = "altr,socfpga-perip-clk", socfpga_periph_init, },
-       { .compatible = "altr,socfpga-gate-clk", socfpga_gate_init, },
-       {},
-};
-
-static void __init socfpga_clkmgr_init(struct device_node *node)
-{
-       clk_mgr_base_addr = of_iomap(node, 0);
-       of_clk_init(socfpga_child_clocks);
-}
-CLK_OF_DECLARE(socfpga_mgr, "altr,clk-mgr", socfpga_clkmgr_init);
+CLK_OF_DECLARE(socfpga_pll_clk, "altr,socfpga-pll-clock", socfpga_pll_init);
+CLK_OF_DECLARE(socfpga_perip_clk, "altr,socfpga-perip-clk", socfpga_periph_init);
+CLK_OF_DECLARE(socfpga_gate_clk, "altr,socfpga-gate-clk", socfpga_gate_init);
 
index bca0a0badbfa68c176aa0e82b663682b53845834..a886702f7c8ba3538ae410be65ba0061ec9d0d96 100644 (file)
@@ -521,8 +521,10 @@ static struct clk * __init clkgen_odf_register(const char *parent_name,
        gate->lock = odf_lock;
 
        div = kzalloc(sizeof(*div), GFP_KERNEL);
-       if (!div)
+       if (!div) {
+               kfree(gate);
                return ERR_PTR(-ENOMEM);
+       }
 
        div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
        div->reg = reg + pll_data->odf[odf].offset;
index 0d20241e07704df196c72ef22510775d22f5bfde..6aad8abc69a2d732c06a6e59a4f412859fba20aa 100644 (file)
@@ -58,9 +58,9 @@
 #define PLLDU_LFCON_SET_DIVN 600
 
 #define PLLE_BASE_DIVCML_SHIFT 24
-#define PLLE_BASE_DIVCML_WIDTH 4
+#define PLLE_BASE_DIVCML_MASK 0xf
 #define PLLE_BASE_DIVP_SHIFT 16
-#define PLLE_BASE_DIVP_WIDTH 7
+#define PLLE_BASE_DIVP_WIDTH 6
 #define PLLE_BASE_DIVN_SHIFT 8
 #define PLLE_BASE_DIVN_WIDTH 8
 #define PLLE_BASE_DIVM_SHIFT 0
 #define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\
                      mask(p->params->div_nmp->divp_width))
 
+#define divm_shift(p) (p)->params->div_nmp->divm_shift
+#define divn_shift(p) (p)->params->div_nmp->divn_shift
+#define divp_shift(p) (p)->params->div_nmp->divp_shift
+
+#define divm_mask_shifted(p) (divm_mask(p) << divm_shift(p))
+#define divn_mask_shifted(p) (divn_mask(p) << divn_shift(p))
+#define divp_mask_shifted(p) (divp_mask(p) << divp_shift(p))
+
 #define divm_max(p) (divm_mask(p))
 #define divn_max(p) (divn_mask(p))
 #define divp_max(p) (1 << (divp_mask(p)))
@@ -476,13 +484,12 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
        } else {
                val = pll_readl_base(pll);
 
-               val &= ~((divm_mask(pll) << div_nmp->divm_shift) |
-                (divn_mask(pll) << div_nmp->divn_shift) |
-                (divp_mask(pll) << div_nmp->divp_shift));
+               val &= ~(divm_mask_shifted(pll) | divn_mask_shifted(pll) |
+                        divp_mask_shifted(pll));
 
-               val |= ((cfg->m << div_nmp->divm_shift) |
-                       (cfg->n << div_nmp->divn_shift) |
-                       (cfg->p << div_nmp->divp_shift));
+               val |= (cfg->m << divm_shift(pll)) |
+                      (cfg->n << divn_shift(pll)) |
+                      (cfg->p << divp_shift(pll));
 
                pll_writel_base(val, pll);
        }
@@ -730,11 +737,12 @@ static int clk_plle_enable(struct clk_hw *hw)
        if (pll->params->flags & TEGRA_PLLE_CONFIGURE) {
                /* configure dividers */
                val = pll_readl_base(pll);
-               val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
-               val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT);
-               val |= sel.m << pll->params->div_nmp->divm_shift;
-               val |= sel.n << pll->params->div_nmp->divn_shift;
-               val |= sel.p << pll->params->div_nmp->divp_shift;
+               val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
+                        divm_mask_shifted(pll));
+               val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
+               val |= sel.m << divm_shift(pll);
+               val |= sel.n << divn_shift(pll);
+               val |= sel.p << divp_shift(pll);
                val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
                pll_writel_base(val, pll);
        }
@@ -745,10 +753,11 @@ static int clk_plle_enable(struct clk_hw *hw)
        pll_writel_misc(val, pll);
 
        val = readl(pll->clk_base + PLLE_SS_CTRL);
+       val &= ~PLLE_SS_COEFFICIENTS_MASK;
        val |= PLLE_SS_DISABLE;
        writel(val, pll->clk_base + PLLE_SS_CTRL);
 
-       val |= pll_readl_base(pll);
+       val = pll_readl_base(pll);
        val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE);
        pll_writel_base(val, pll);
 
@@ -1292,10 +1301,11 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
        pll_writel(val, PLLE_SS_CTRL, pll);
 
        val = pll_readl_base(pll);
-       val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
-       val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT);
-       val |= sel.m << pll->params->div_nmp->divm_shift;
-       val |= sel.n << pll->params->div_nmp->divn_shift;
+       val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
+                divm_mask_shifted(pll));
+       val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
+       val |= sel.m << divm_shift(pll);
+       val |= sel.n << divn_shift(pll);
        val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
        pll_writel_base(val, pll);
        udelay(1);
@@ -1410,6 +1420,15 @@ struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
        return clk;
 }
 
+static struct div_nmp pll_e_nmp = {
+       .divn_shift = PLLE_BASE_DIVN_SHIFT,
+       .divn_width = PLLE_BASE_DIVN_WIDTH,
+       .divm_shift = PLLE_BASE_DIVM_SHIFT,
+       .divm_width = PLLE_BASE_DIVM_WIDTH,
+       .divp_shift = PLLE_BASE_DIVP_SHIFT,
+       .divp_width = PLLE_BASE_DIVP_WIDTH,
+};
+
 struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
                void __iomem *clk_base, void __iomem *pmc,
                unsigned long flags, struct tegra_clk_pll_params *pll_params,
@@ -1420,6 +1439,10 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
 
        pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS;
        pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+
+       if (!pll_params->div_nmp)
+               pll_params->div_nmp = &pll_e_nmp;
+
        pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
        if (IS_ERR(pll))
                return ERR_CAST(pll);
@@ -1557,9 +1580,8 @@ struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
                int m;
 
                m = _pll_fixed_mdiv(pll_params, parent_rate);
-               val = m << PLL_BASE_DIVM_SHIFT;
-               val |= (pll_params->vco_min / parent_rate)
-                               << PLL_BASE_DIVN_SHIFT;
+               val = m << divm_shift(pll);
+               val |= (pll_params->vco_min / parent_rate) << divn_shift(pll);
                pll_writel_base(val, pll);
        }
 
@@ -1718,7 +1740,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
                                        "pll_re_vco");
        } else {
                val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
-               pll_writel(val, pll_params->aux_reg, pll);
+               pll_writel(val_aux, pll_params->aux_reg, pll);
        }
 
        clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
index 00fdd11702849e042a2550dec0ac089075ada987..a8d7ea14f1835b3d8c6d45e20937051fd79f7cbb 100644 (file)
@@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
                        || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
                __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
                __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
-               clk_disable_unprepare(tcd->clk);
+               clk_disable(tcd->clk);
        }
 
        switch (m) {
@@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
         * of oneshot, we get lower overhead and improved accuracy.
         */
        case CLOCK_EVT_MODE_PERIODIC:
-               clk_prepare_enable(tcd->clk);
+               clk_enable(tcd->clk);
 
                /* slow clock, count up to RC, then irq and restart */
                __raw_writel(timer_clock
@@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
                break;
 
        case CLOCK_EVT_MODE_ONESHOT:
-               clk_prepare_enable(tcd->clk);
+               clk_enable(tcd->clk);
 
                /* slow clock, count up to RC, then irq and stop */
                __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
@@ -194,7 +194,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
        ret = clk_prepare_enable(t2_clk);
        if (ret)
                return ret;
-       clk_disable_unprepare(t2_clk);
+       clk_disable(t2_clk);
 
        clkevt.regs = tc->regs;
        clkevt.clk = t2_clk;
index b52e1c078b9955330dda32f803a26e8fa1527ab7..7f5374dbefd933d41ae5b64333d1cc8d9b52a6ce 100644 (file)
@@ -199,7 +199,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
 
        action->dev_id = ce;
        BUG_ON(setup_irq(ce->irq, action));
-       irq_set_affinity(action->irq, cpumask_of(cpu));
+       irq_force_affinity(action->irq, cpumask_of(cpu));
 
        clockevents_register_device(ce);
        return 0;
index 1bf6bbac3e03ae1bd9ffc4f001a71f7a0d0befcb..09b9129c7bd3b806cbacde8471ed574dd260ea25 100644 (file)
@@ -130,7 +130,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
                return -ENOENT;
        }
 
-       cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0");
+       cpu_reg = regulator_get_optional(cpu_dev, "cpu0");
        if (IS_ERR(cpu_reg)) {
                /*
                 * If cpu0 regulator supply node is present, but regulator is
@@ -145,23 +145,23 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
                        PTR_ERR(cpu_reg));
        }
 
-       cpu_clk = devm_clk_get(cpu_dev, NULL);
+       cpu_clk = clk_get(cpu_dev, NULL);
        if (IS_ERR(cpu_clk)) {
                ret = PTR_ERR(cpu_clk);
                pr_err("failed to get cpu0 clock: %d\n", ret);
-               goto out_put_node;
+               goto out_put_reg;
        }
 
        ret = of_init_opp_table(cpu_dev);
        if (ret) {
                pr_err("failed to init OPP table: %d\n", ret);
-               goto out_put_node;
+               goto out_put_clk;
        }
 
        ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
        if (ret) {
                pr_err("failed to init cpufreq table: %d\n", ret);
-               goto out_put_node;
+               goto out_put_clk;
        }
 
        of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
@@ -216,6 +216,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
 
 out_free_table:
        dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_put_clk:
+       if (!IS_ERR(cpu_clk))
+               clk_put(cpu_clk);
+out_put_reg:
+       if (!IS_ERR(cpu_reg))
+               regulator_put(cpu_reg);
 out_put_node:
        of_node_put(np);
        return ret;
index ba43991ba98aeeee2c7f833be6cb08f58638bb19..e1c6433b16e06653d159782b0a053830e570e3af 100644 (file)
@@ -366,6 +366,11 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                break;
 
        case CPUFREQ_GOV_LIMITS:
+               mutex_lock(&dbs_data->mutex);
+               if (!cpu_cdbs->cur_policy) {
+                       mutex_unlock(&dbs_data->mutex);
+                       break;
+               }
                mutex_lock(&cpu_cdbs->timer_mutex);
                if (policy->max < cpu_cdbs->cur_policy->cur)
                        __cpufreq_driver_target(cpu_cdbs->cur_policy,
@@ -375,6 +380,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                                        policy->min, CPUFREQ_RELATION_L);
                dbs_check_cpu(dbs_data, cpu);
                mutex_unlock(&cpu_cdbs->timer_mutex);
+               mutex_unlock(&dbs_data->mutex);
                break;
        }
        return 0;
index 099967302bf25939019875846f7819461a4805c2..eab8ccfe6bebed652b8fec4849a5fc9fb67d82f3 100644 (file)
@@ -37,6 +37,7 @@
 #define BYT_RATIOS             0x66a
 #define BYT_VIDS               0x66b
 #define BYT_TURBO_RATIOS       0x66c
+#define BYT_TURBO_VIDS         0x66d
 
 
 #define FRAC_BITS 6
@@ -70,8 +71,9 @@ struct pstate_data {
 };
 
 struct vid_data {
-       int32_t min;
-       int32_t max;
+       int min;
+       int max;
+       int turbo;
        int32_t ratio;
 };
 
@@ -359,14 +361,14 @@ static int byt_get_min_pstate(void)
 {
        u64 value;
        rdmsrl(BYT_RATIOS, value);
-       return (value >> 8) & 0xFF;
+       return (value >> 8) & 0x3F;
 }
 
 static int byt_get_max_pstate(void)
 {
        u64 value;
        rdmsrl(BYT_RATIOS, value);
-       return (value >> 16) & 0xFF;
+       return (value >> 16) & 0x3F;
 }
 
 static int byt_get_turbo_pstate(void)
@@ -393,6 +395,9 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
        vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
        vid = fp_toint(vid_fp);
 
+       if (pstate > cpudata->pstate.max_pstate)
+               vid = cpudata->vid.turbo;
+
        val |= vid;
 
        wrmsrl(MSR_IA32_PERF_CTL, val);
@@ -402,13 +407,17 @@ static void byt_get_vid(struct cpudata *cpudata)
 {
        u64 value;
 
+
        rdmsrl(BYT_VIDS, value);
-       cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
-       cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
+       cpudata->vid.min = int_tofp((value >> 8) & 0x3f);
+       cpudata->vid.max = int_tofp((value >> 16) & 0x3f);
        cpudata->vid.ratio = div_fp(
                cpudata->vid.max - cpudata->vid.min,
                int_tofp(cpudata->pstate.max_pstate -
                        cpudata->pstate.min_pstate));
+
+       rdmsrl(BYT_TURBO_VIDS, value);
+       cpudata->vid.turbo = value & 0x7f;
 }
 
 
@@ -545,12 +554,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
 
        if (pstate_funcs.get_vid)
                pstate_funcs.get_vid(cpu);
-
-       /*
-        * goto max pstate so we don't slow up boot if we are built-in if we are
-        * a module we will take care of it during normal operation
-        */
-       intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
+       intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
 }
 
 static inline void intel_pstate_calc_busy(struct cpudata *cpu,
@@ -695,11 +699,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
        cpu = all_cpu_data[cpunum];
 
        intel_pstate_get_cpu_pstates(cpu);
-       if (!cpu->pstate.current_pstate) {
-               all_cpu_data[cpunum] = NULL;
-               kfree(cpu);
-               return -ENODATA;
-       }
 
        cpu->cpu = cpunum;
 
@@ -710,7 +709,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
        cpu->timer.expires = jiffies + HZ/100;
        intel_pstate_busy_pid_reset(cpu);
        intel_pstate_sample(cpu);
-       intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
 
        add_timer_on(&cpu->timer, cpunum);
 
index f0bc31f5db27a41db3d7f8556e72274b42a5eda5..d4add86219444af31891ef8c76013eaed0838282 100644 (file)
@@ -62,7 +62,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
        set_cpus_allowed_ptr(current, &cpus_allowed);
 
        /* setting the cpu frequency */
-       clk_set_rate(policy->clk, freq);
+       clk_set_rate(policy->clk, freq * 1000);
 
        return 0;
 }
@@ -92,7 +92,7 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
             i++)
                loongson2_clockmod_table[i].frequency = (rate * i) / 8;
 
-       ret = clk_set_rate(cpuclk, rate);
+       ret = clk_set_rate(cpuclk, rate * 1000);
        if (ret) {
                clk_put(cpuclk);
                return ret;
index 9f25f5296029aeb0a22e0caf81bf35ca41b00063..0eabd81e1a902711bb838eb93dfed8e3289be610 100644 (file)
        char *tmp;                                              \
                                                                \
        tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC);  \
-       sprintf(tmp, format, param);                            \
-       strcat(str, tmp);                                       \
-       kfree(tmp);                                             \
+       if (likely(tmp)) {                                      \
+               sprintf(tmp, format, param);                    \
+               strcat(str, tmp);                               \
+               kfree(tmp);                                     \
+       } else {                                                \
+               strcat(str, "kmalloc failure in SPRINTFCAT");   \
+       }                                                       \
 }
 
 static void report_jump_idx(u32 status, char *outstr)
index a886713937fd05b38fc10866bbce5266ac0bfae1..d5d30ed863ceb904c6e19e245b94d7ddba40f88f 100644 (file)
@@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref)
                dma_unmap_page(dev, unmap->addr[i], unmap->len,
                               DMA_BIDIRECTIONAL);
        }
+       cnt = unmap->map_cnt;
        mempool_free(unmap, __get_unmap_pool(cnt)->pool);
 }
 
@@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
        memset(unmap, 0, sizeof(*unmap));
        kref_init(&unmap->kref);
        unmap->dev = dev;
+       unmap->map_cnt = nr;
 
        return unmap;
 }
index cfdbb92aae1dece5512f023afe341db2b086d171..7a740769c2fa592a0cbf7a73cdf2166e655e63d2 100644 (file)
@@ -1548,11 +1548,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
        /* Disable BLOCK interrupts as well */
        channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
 
-       err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
-                              IRQF_SHARED, "dw_dmac", dw);
-       if (err)
-               return err;
-
        /* Create a pool of consistent memory blocks for hardware descriptors */
        dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
                                         sizeof(struct dw_desc), 4, 0);
@@ -1563,6 +1558,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
 
        tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
 
+       err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
+                         "dw_dmac", dw);
+       if (err)
+               return err;
+
        INIT_LIST_HEAD(&dw->dma.channels);
        for (i = 0; i < nr_channels; i++) {
                struct dw_dma_chan      *dwc = &dw->chan[i];
@@ -1667,6 +1667,7 @@ int dw_dma_remove(struct dw_dma_chip *chip)
        dw_dma_off(dw);
        dma_async_device_unregister(&dw->dma);
 
+       free_irq(chip->irq, dw);
        tasklet_kill(&dw->tasklet);
 
        list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
index 766b68ed505c4d2b3964bfb1f0de6ab5ae1ff3a9..394cbc5c93e3600a8c918bfb673756bdd279ce7b 100644 (file)
@@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
 
 static void mv_chan_activate(struct mv_xor_chan *chan)
 {
-       u32 activation;
-
        dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
-       activation = readl_relaxed(XOR_ACTIVATION(chan));
-       activation |= 0x1;
-       writel_relaxed(activation, XOR_ACTIVATION(chan));
+
+       /* writel ensures all descriptors are flushed before activation */
+       writel(BIT(0), XOR_ACTIVATION(chan));
 }
 
 static char mv_chan_is_busy(struct mv_xor_chan *chan)
index ab26d46bbe1598434625979abeb488d5199992d9..5ebdfbc1051ea7ed6e4dd948f9cac1e9b04760fc 100644 (file)
@@ -113,11 +113,9 @@ struct sa11x0_dma_phy {
        struct sa11x0_dma_desc  *txd_load;
        unsigned                sg_done;
        struct sa11x0_dma_desc  *txd_done;
-#ifdef CONFIG_PM_SLEEP
        u32                     dbs[2];
        u32                     dbt[2];
        u32                     dcsr;
-#endif
 };
 
 struct sa11x0_dma_dev {
@@ -984,7 +982,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
 static int sa11x0_dma_suspend(struct device *dev)
 {
        struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
@@ -1054,7 +1051,6 @@ static int sa11x0_dma_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
 static const struct dev_pm_ops sa11x0_dma_pm_ops = {
        .suspend_noirq = sa11x0_dma_suspend,
index c98764aeeec6112928fcde7ac0907949aab001ea..f477308b6e9ce23645784b3edc1bbd92b29b251d 100644 (file)
@@ -237,8 +237,8 @@ static inline bool is_next_generation(int new_generation, int old_generation)
 
 #define LOCAL_BUS 0xffc0
 
-/* arbitrarily chosen maximum range for physical DMA: 128 TB */
-#define FW_MAX_PHYSICAL_RANGE          (128ULL << 40)
+/* OHCI-1394's default upper bound for physical DMA: 4 GB */
+#define FW_MAX_PHYSICAL_RANGE          (1ULL << 32)
 
 void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
 void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
index 8db66321956068701cde997e0bc25e167e781eb3..586f2f7f6993d350ca43fb0947bb0d457038d23f 100644 (file)
@@ -3716,7 +3716,7 @@ static int pci_probe(struct pci_dev *dev,
                    version >> 16, version & 0xff, ohci->card.index,
                    ohci->n_ir, ohci->n_it, ohci->quirks,
                    reg_read(ohci, OHCI1394_PhyUpperBound) ?
-                       ", >4 GB phys DMA" : "");
+                       ", physUB" : "");
 
        return 0;
 
index 3ee852c9925b6a1aa1887df101662194eeb3c42c..071c2c969eec06ad929ecfb871c614297a615e9e 100644 (file)
@@ -756,6 +756,7 @@ static const struct {
         */
        { ACPI_SIG_IBFT },
        { "iBFT" },
+       { "BIFT" },     /* Broadcom iSCSI Offload */
 };
 
 static void __init acpi_find_ibft_region(void)
index e73c6755a5eb6b324d06f14f4a7cf574d025c8ca..70304220a479a9862f4f5709e6a9a88a785b487d 100644 (file)
@@ -305,6 +305,8 @@ static struct ichx_desc ich6_desc = {
 
        .ngpio = 50,
        .have_blink = true,
+       .regs = ichx_regs,
+       .reglen = ichx_reglen,
 };
 
 /* Intel 3100 */
@@ -324,6 +326,8 @@ static struct ichx_desc i3100_desc = {
        .uses_gpe0 = true,
 
        .ngpio = 50,
+       .regs = ichx_regs,
+       .reglen = ichx_reglen,
 };
 
 /* ICH7 and ICH8-based */
index 99a68310e7c09eb053f5cd28395549ece9f8babe..3d53fd6880d1970b074f6b0eda2575ea55e27476 100644 (file)
@@ -894,9 +894,11 @@ static int mcp23s08_probe(struct spi_device *spi)
                        dev_err(&spi->dev, "invalid spi-present-mask\n");
                        return -ENODEV;
                }
-
-               for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++)
+               for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
+                       if ((spi_present_mask & (1 << addr)))
+                               chips++;
                        pullups[addr] = 0;
+               }
        } else {
                type = spi_get_device_id(spi)->driver_data;
                pdata = dev_get_platdata(&spi->dev);
@@ -919,12 +921,12 @@ static int mcp23s08_probe(struct spi_device *spi)
                        pullups[addr] = pdata->chip[addr].pullups;
                }
 
-               if (!chips)
-                       return -ENODEV;
-
                base = pdata->base;
        }
 
+       if (!chips)
+               return -ENODEV;
+
        data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08),
                        GFP_KERNEL);
        if (!data)
index ec82f6bff1225dc06e5b591838c59778f65cecb5..108e1ec2fa4b491b7d34d3a977e0c3b427a975ab 100644 (file)
@@ -1954,6 +1954,9 @@ struct drm_i915_cmd_table {
 #define IS_ULT(dev)            (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
 #define IS_HSW_GT3(dev)                (IS_HASWELL(dev) && \
                                 ((dev)->pdev->device & 0x00F0) == 0x0020)
+/* ULX machines are also considered ULT. */
+#define IS_HSW_ULX(dev)                ((dev)->pdev->device == 0x0A0E || \
+                                (dev)->pdev->device == 0x0A1E)
 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
 /*
index 62a5c3627b90eaea4e6620cb829af572781e3d94..154b0f8bb88de02addd24d21e51494728897a622 100644 (file)
@@ -34,25 +34,35 @@ static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv);
 
 bool intel_enable_ppgtt(struct drm_device *dev, bool full)
 {
-       if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+       if (i915.enable_ppgtt == 0)
                return false;
 
        if (i915.enable_ppgtt == 1 && full)
                return false;
 
+       return true;
+}
+
+static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
+{
+       if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+               return 0;
+
+       if (enable_ppgtt == 1)
+               return 1;
+
+       if (enable_ppgtt == 2 && HAS_PPGTT(dev))
+               return 2;
+
 #ifdef CONFIG_INTEL_IOMMU
        /* Disable ppgtt on SNB if VT-d is on. */
        if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
                DRM_INFO("Disabling PPGTT because VT-d is on\n");
-               return false;
+               return 0;
        }
 #endif
 
-       /* Full ppgtt disabled by default for now due to issues. */
-       if (full)
-               return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2);
-       else
-               return HAS_ALIASING_PPGTT(dev);
+       return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
 }
 
 #define GEN6_PPGTT_PD_ENTRIES 512
@@ -2031,6 +2041,14 @@ int i915_gem_gtt_init(struct drm_device *dev)
                 gtt->base.total >> 20);
        DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
        DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
+       /*
+        * i915.enable_ppgtt is read-only, so do an early pass to validate the
+        * user's requested state against the hardware/driver capabilities.  We
+        * do this now so that we can print out any log messages once rather
+        * than every time we check intel_enable_ppgtt().
+        */
+       i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
+       DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
 
        return 0;
 }
index fa486c5fbb0250650b558632c6f63aa0ebf5bd63..aff4a113cda3c0cd724d3b4d13ce0fb8bbc48313 100644 (file)
@@ -560,47 +560,71 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
 
        dev_priv->vbt.edp_pps = *edp_pps;
 
-       dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
-               DP_LINK_BW_1_62;
+       switch (edp_link_params->rate) {
+       case EDP_RATE_1_62:
+               dev_priv->vbt.edp_rate = DP_LINK_BW_1_62;
+               break;
+       case EDP_RATE_2_7:
+               dev_priv->vbt.edp_rate = DP_LINK_BW_2_7;
+               break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
+                             edp_link_params->rate);
+               break;
+       }
+
        switch (edp_link_params->lanes) {
-       case 0:
+       case EDP_LANE_1:
                dev_priv->vbt.edp_lanes = 1;
                break;
-       case 1:
+       case EDP_LANE_2:
                dev_priv->vbt.edp_lanes = 2;
                break;
-       case 3:
-       default:
+       case EDP_LANE_4:
                dev_priv->vbt.edp_lanes = 4;
                break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
+                             edp_link_params->lanes);
+               break;
        }
+
        switch (edp_link_params->preemphasis) {
-       case 0:
+       case EDP_PREEMPHASIS_NONE:
                dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
                break;
-       case 1:
+       case EDP_PREEMPHASIS_3_5dB:
                dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
                break;
-       case 2:
+       case EDP_PREEMPHASIS_6dB:
                dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
                break;
-       case 3:
+       case EDP_PREEMPHASIS_9_5dB:
                dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
                break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
+                             edp_link_params->preemphasis);
+               break;
        }
+
        switch (edp_link_params->vswing) {
-       case 0:
+       case EDP_VSWING_0_4V:
                dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
                break;
-       case 1:
+       case EDP_VSWING_0_6V:
                dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
                break;
-       case 2:
+       case EDP_VSWING_0_8V:
                dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
                break;
-       case 3:
+       case EDP_VSWING_1_2V:
                dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
                break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
+                             edp_link_params->vswing);
+               break;
        }
 }
 
index 69bcc42a0e44327679217d29a9415e553bd3564b..48aa516a1ac0c354cd5e7ad3ab95897e4210a337 100644 (file)
@@ -11395,15 +11395,6 @@ void intel_modeset_init(struct drm_device *dev)
        }
 }
 
-static void
-intel_connector_break_all_links(struct intel_connector *connector)
-{
-       connector->base.dpms = DRM_MODE_DPMS_OFF;
-       connector->base.encoder = NULL;
-       connector->encoder->connectors_active = false;
-       connector->encoder->base.crtc = NULL;
-}
-
 static void intel_enable_pipe_a(struct drm_device *dev)
 {
        struct intel_connector *connector;
@@ -11485,8 +11476,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
                        if (connector->encoder->base.crtc != &crtc->base)
                                continue;
 
-                       intel_connector_break_all_links(connector);
+                       connector->base.dpms = DRM_MODE_DPMS_OFF;
+                       connector->base.encoder = NULL;
                }
+               /* multiple connectors may have the same encoder:
+                *  handle them and break crtc link separately */
+               list_for_each_entry(connector, &dev->mode_config.connector_list,
+                                   base.head)
+                       if (connector->encoder->base.crtc == &crtc->base) {
+                               connector->encoder->base.crtc = NULL;
+                               connector->encoder->connectors_active = false;
+                       }
 
                WARN_ON(crtc->active);
                crtc->base.enabled = false;
@@ -11568,6 +11568,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
                                      drm_get_encoder_name(&encoder->base));
                        encoder->disable(encoder);
                }
+               encoder->base.crtc = NULL;
+               encoder->connectors_active = false;
 
                /* Inconsistent output/port/pipe state happens presumably due to
                 * a bug in one of the get_hw_state functions. Or someplace else
@@ -11578,8 +11580,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
                                    base.head) {
                        if (connector->encoder != encoder)
                                continue;
-
-                       intel_connector_break_all_links(connector);
+                       connector->base.dpms = DRM_MODE_DPMS_OFF;
+                       connector->base.encoder = NULL;
                }
        }
        /* Enabled encoders without active connectors will be fixed in
index dfa85289f28f301fe259b522b45ef1521489175e..2a00cb828d20c7549a7b00a6745c228998cc136b 100644 (file)
@@ -105,7 +105,8 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
        case DP_LINK_BW_2_7:
                break;
        case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
-               if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) &&
+               if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
+                    INTEL_INFO(dev)->gen >= 8) &&
                    intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
                        max_link_bw = DP_LINK_BW_5_4;
                else
@@ -120,6 +121,22 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
        return max_link_bw;
 }
 
+static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       u8 source_max, sink_max;
+
+       source_max = 4;
+       if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
+           (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
+               source_max = 2;
+
+       sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
+
+       return min(source_max, sink_max);
+}
+
 /*
  * The units on the numbers in the next two are... bizarre.  Examples will
  * make it clearer; this one parallels an example in the eDP spec.
@@ -170,7 +187,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
        }
 
        max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
-       max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
+       max_lanes = intel_dp_max_lane_count(intel_dp);
 
        max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
        mode_rate = intel_dp_link_required(target_clock, 18);
@@ -750,8 +767,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        struct intel_crtc *intel_crtc = encoder->new_crtc;
        struct intel_connector *intel_connector = intel_dp->attached_connector;
        int lane_count, clock;
-       int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+       int min_lane_count = 1;
+       int max_lane_count = intel_dp_max_lane_count(intel_dp);
        /* Conveniently, the link BW constants become indices with a shift...*/
+       int min_clock = 0;
        int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
        int bpp, mode_rate;
        static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
@@ -784,19 +803,38 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        /* Walk through all bpp values. Luckily they're all nicely spaced with 2
         * bpc in between. */
        bpp = pipe_config->pipe_bpp;
-       if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
-           dev_priv->vbt.edp_bpp < bpp) {
-               DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
-                             dev_priv->vbt.edp_bpp);
-               bpp = dev_priv->vbt.edp_bpp;
+       if (is_edp(intel_dp)) {
+               if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
+                       DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
+                                     dev_priv->vbt.edp_bpp);
+                       bpp = dev_priv->vbt.edp_bpp;
+               }
+
+               if (IS_BROADWELL(dev)) {
+                       /* Yes, it's an ugly hack. */
+                       min_lane_count = max_lane_count;
+                       DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
+                                     min_lane_count);
+               } else if (dev_priv->vbt.edp_lanes) {
+                       min_lane_count = min(dev_priv->vbt.edp_lanes,
+                                            max_lane_count);
+                       DRM_DEBUG_KMS("using min %u lanes per VBT\n",
+                                     min_lane_count);
+               }
+
+               if (dev_priv->vbt.edp_rate) {
+                       min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
+                       DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
+                                     bws[min_clock]);
+               }
        }
 
        for (; bpp >= 6*3; bpp -= 2*3) {
                mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
                                                   bpp);
 
-               for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
-                       for (clock = 0; clock <= max_clock; clock++) {
+               for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
+                       for (clock = min_clock; clock <= max_clock; clock++) {
                                link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
                                link_avail = intel_dp_max_data_rate(link_clock,
                                                                    lane_count);
index fce4a0d93c0b19b7d51f4d2578331b13aef51399..f73ba5e6b7a8d685b4530e16c6735c095c2aeb5f 100644 (file)
@@ -387,6 +387,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                                                          height);
                }
 
+               /* No preferred mode marked by the EDID? Are there any modes? */
+               if (!modes[i] && !list_empty(&connector->modes)) {
+                       DRM_DEBUG_KMS("using first mode listed on connector %s\n",
+                                     drm_get_connector_name(connector));
+                       modes[i] = list_first_entry(&connector->modes,
+                                                   struct drm_display_mode,
+                                                   head);
+               }
+
                /* last resort: use current mode */
                if (!modes[i]) {
                        /*
index 0eead16aeda7404053de8c9885992b0904c1049c..cb8cfb7e09749938383c18a7eb6e2bc149823095 100644 (file)
@@ -492,6 +492,7 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
        enum pipe pipe = intel_get_pipe_from_connector(connector);
        u32 freq;
        unsigned long flags;
+       u64 n;
 
        if (!panel->backlight.present || pipe == INVALID_PIPE)
                return;
@@ -502,10 +503,9 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
 
        /* scale to hardware max, but be careful to not overflow */
        freq = panel->backlight.max;
-       if (freq < max)
-               level = level * freq / max;
-       else
-               level = freq / max * level;
+       n = (u64)level * freq;
+       do_div(n, max);
+       level = n;
 
        panel->backlight.level = level;
        if (panel->backlight.device)
index 19e94c3edc1957d96b928fb23d696b393216fc42..d93dcf683e8c3695960ca93c92dce87a7823d3f2 100644 (file)
@@ -2095,6 +2095,43 @@ static void intel_print_wm_latency(struct drm_device *dev,
        }
 }
 
+static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
+                                   uint16_t wm[5], uint16_t min)
+{
+       int level, max_level = ilk_wm_max_level(dev_priv->dev);
+
+       if (wm[0] >= min)
+               return false;
+
+       wm[0] = max(wm[0], min);
+       for (level = 1; level <= max_level; level++)
+               wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
+
+       return true;
+}
+
+static void snb_wm_latency_quirk(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       bool changed;
+
+       /*
+        * The BIOS provided WM memory latency values are often
+        * inadequate for high resolution displays. Adjust them.
+        */
+       changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
+               ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
+               ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
+
+       if (!changed)
+               return;
+
+       DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
+       intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
+       intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
+       intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+}
+
 static void ilk_setup_wm_latency(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2112,6 +2149,9 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
        intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
        intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
        intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+
+       if (IS_GEN6(dev))
+               snb_wm_latency_quirk(dev);
 }
 
 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
index d27155adf5db2b039dee51b9aa0de25e60c8967e..46be00d66df3da3e74597ba9c02f0ac745741751 100644 (file)
@@ -2424,8 +2424,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
        if (ret < 0)
                goto err1;
 
-       ret = sysfs_create_link(&encoder->ddc.dev.kobj,
-                               &drm_connector->kdev->kobj,
+       ret = sysfs_create_link(&drm_connector->kdev->kobj,
+                               &encoder->ddc.dev.kobj,
                                encoder->ddc.dev.kobj.name);
        if (ret < 0)
                goto err2;
index f729dc71d5beb031599aca72f82c90ec946e2fe0..d0c75779d3f6f91e9cc98b0f1a853344783fdb48 100644 (file)
@@ -185,6 +185,8 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
 {
        __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
                           _MASKED_BIT_DISABLE(0xffff));
+       __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+                          _MASKED_BIT_DISABLE(0xffff));
        /* something from same cacheline, but !FORCEWAKE_VLV */
        __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
 }
index 7762665ad8fdb5b461680ee84b7b224b77a5d4d5..876de9ac3793fd30af193ac7e8410818bdb56e7a 100644 (file)
@@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
        }
 
        if (outp == 8)
-               return false;
+               return conf;
 
        data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
        if (data == 0x0000)
index 1dc37b1ddbfac0a2ecddcfdcd59a80a267e65520..b0d0fb2f4d083813a799d1c02bb3d953646f2889 100644 (file)
@@ -863,7 +863,7 @@ gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
 {
        mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
        mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
-       mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
+       mmio_data(0x200000, 0x1000, NV_MEM_ACCESS_RW);
 
        mmio_list(0x40800c, 0x00000000,  8, 1);
        mmio_list(0x408010, 0x80000000,  0, 0);
@@ -877,6 +877,8 @@ gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
        mmio_list(0x418e24, 0x00000000,  8, 0);
        mmio_list(0x418e28, 0x80000030,  0, 0);
 
+       mmio_list(0x4064c8, 0x018002c0,  0, 0);
+
        mmio_list(0x418810, 0x80000000, 12, 2);
        mmio_list(0x419848, 0x10000000, 12, 2);
        mmio_list(0x419c2c, 0x10000000, 12, 2);
index fb0b6b2d1427f436f14666c3611d4d6807bd2d36..222e8ebb669dff496534331682e7a77c7e6bd9e0 100644 (file)
@@ -168,7 +168,8 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
         */
        i = 16;
        do {
-               if ((nv_rd32(bios, 0x300000) & 0xffff) == 0xaa55)
+               u32 data = le32_to_cpu(nv_rd32(bios, 0x300000)) & 0xffff;
+               if (data == 0xaa55)
                        break;
        } while (i--);
 
@@ -176,14 +177,15 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
                goto out;
 
        /* read entire bios image to system memory */
-       bios->size = ((nv_rd32(bios, 0x300000) >> 16) & 0xff) * 512;
+       bios->size = (le32_to_cpu(nv_rd32(bios, 0x300000)) >> 16) & 0xff;
+       bios->size = bios->size * 512;
        if (!bios->size)
                goto out;
 
        bios->data = kmalloc(bios->size, GFP_KERNEL);
        if (bios->data) {
-               for (i = 0; i < bios->size; i+=4)
-                       nv_wo32(bios, i, nv_rd32(bios, 0x300000 + i));
+               for (i = 0; i < bios->size; i += 4)
+                       ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i);
        }
 
        /* check the PCI record header */
index 43fec17ea540b6c53af704931d4baf29d2391e4e..bbf117be572f4617cff0fd06907fb2a13977ae93 100644 (file)
@@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line)
                case 0x00: return 2;
                case 0x19: return 1;
                case 0x1c: return 0;
+               case 0x1e: return 2;
                default:
                        break;
                }
index 83face3f608f020f70d7d11dda363c5817102238..279206997e5cd7d091d44e26f84ce5efdfb06f1d 100644 (file)
@@ -389,9 +389,6 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
        acpi_status status;
        acpi_handle dhandle, rom_handle;
 
-       if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
-               return false;
-
        dhandle = ACPI_HANDLE(&pdev->dev);
        if (!dhandle)
                return false;
index 3ff030dc1ee35d34925b2fd5464f1236ad10d925..da764a4ed9588273fe6cf02ed0d6c589aa2ebda0 100644 (file)
@@ -764,9 +764,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        }
 
        ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
-       mutex_unlock(&chan->cli->mutex);
        if (ret)
                goto fail_unreserve;
+       mutex_unlock(&chan->cli->mutex);
 
        /* Update the crtc struct and cleanup */
        crtc->primary->fb = fb;
index fb187c78978f8d5a139359381870aaea8ad1cc91..c31c12b4e66681614f0d294e4fd0a0cf19350c88 100644 (file)
@@ -1177,27 +1177,43 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
 
                /* Set NUM_BANKS. */
                if (rdev->family >= CHIP_TAHITI) {
-                       unsigned tileb, index, num_banks, tile_split_bytes;
+                       unsigned index, num_banks;
 
-                       /* Calculate the macrotile mode index. */
-                       tile_split_bytes = 64 << tile_split;
-                       tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
-                       tileb = min(tile_split_bytes, tileb);
+                       if (rdev->family >= CHIP_BONAIRE) {
+                               unsigned tileb, tile_split_bytes;
 
-                       for (index = 0; tileb > 64; index++) {
-                               tileb >>= 1;
-                       }
+                               /* Calculate the macrotile mode index. */
+                               tile_split_bytes = 64 << tile_split;
+                               tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
+                               tileb = min(tile_split_bytes, tileb);
 
-                       if (index >= 16) {
-                               DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
-                                         target_fb->bits_per_pixel, tile_split);
-                               return -EINVAL;
-                       }
+                               for (index = 0; tileb > 64; index++)
+                                       tileb >>= 1;
+
+                               if (index >= 16) {
+                                       DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
+                                                 target_fb->bits_per_pixel, tile_split);
+                                       return -EINVAL;
+                               }
 
-                       if (rdev->family >= CHIP_BONAIRE)
                                num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
-                       else
+                       } else {
+                               switch (target_fb->bits_per_pixel) {
+                               case 8:
+                                       index = 10;
+                                       break;
+                               case 16:
+                                       index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP;
+                                       break;
+                               default:
+                               case 32:
+                                       index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP;
+                                       break;
+                               }
+
                                num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3;
+                       }
+
                        fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
                } else {
                        /* NI and older. */
@@ -1720,8 +1736,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
                }
                /* otherwise, pick one of the plls */
                if ((rdev->family == CHIP_KAVERI) ||
-                   (rdev->family == CHIP_KABINI)) {
-                       /* KB/KV has PPLL1 and PPLL2 */
+                   (rdev->family == CHIP_KABINI) ||
+                   (rdev->family == CHIP_MULLINS)) {
+                       /* KB/KV/ML has PPLL1 and PPLL2 */
                        pll_in_use = radeon_get_pll_use_mask(crtc);
                        if (!(pll_in_use & (1 << ATOM_PPLL2)))
                                return ATOM_PPLL2;
@@ -1885,6 +1902,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
            (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
                is_tvcv = true;
 
+       if (!radeon_crtc->adjusted_clock)
+               return -EINVAL;
+
        atombios_crtc_set_pll(crtc, adjusted_mode);
 
        if (ASIC_IS_DCE4(rdev))
index bc0119fb6c12a9373e1bd282886d38f8ed858135..54e4f52549af47f19edf39340754a4e33a4da49c 100644 (file)
@@ -366,11 +366,11 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
        if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
                return;
 
-       if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3))
+       if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3)
                DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
 
-       if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3))
+       if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3)
                DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
 }
@@ -419,21 +419,23 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
 
        if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
                /* DP bridge chips */
-               drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
-                                 DP_EDP_CONFIGURATION_CAP, &tmp);
-               if (tmp & 1)
-                       panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
-               else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
-                        (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
-                       panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
-               else
-                       panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+               if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
+                                     DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
+                       if (tmp & 1)
+                               panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+                       else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
+                                (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
+                               panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
+                       else
+                               panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+               }
        } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
                /* eDP */
-               drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
-                                 DP_EDP_CONFIGURATION_CAP, &tmp);
-               if (tmp & 1)
-                       panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+               if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
+                                     DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
+                       if (tmp & 1)
+                               panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+               }
        }
 
        return panel_mode;
@@ -809,11 +811,15 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
        else
                dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
 
-       drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp);
-       if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
-               dp_info.tp3_supported = true;
-       else
+       if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp)
+           == 1) {
+               if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
+                       dp_info.tp3_supported = true;
+               else
+                       dp_info.tp3_supported = false;
+       } else {
                dp_info.tp3_supported = false;
+       }
 
        memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
        dp_info.rdev = rdev;
index 199eb194716f83b64aa7c3850d30606f4ba3f053..d2fd989680857d5a08a7d65d586bb0b4a40be6e2 100644 (file)
@@ -63,6 +63,12 @@ MODULE_FIRMWARE("radeon/KABINI_ce.bin");
 MODULE_FIRMWARE("radeon/KABINI_mec.bin");
 MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
 MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
+MODULE_FIRMWARE("radeon/MULLINS_pfp.bin");
+MODULE_FIRMWARE("radeon/MULLINS_me.bin");
+MODULE_FIRMWARE("radeon/MULLINS_ce.bin");
+MODULE_FIRMWARE("radeon/MULLINS_mec.bin");
+MODULE_FIRMWARE("radeon/MULLINS_rlc.bin");
+MODULE_FIRMWARE("radeon/MULLINS_sdma.bin");
 
 extern int r600_ih_ring_alloc(struct radeon_device *rdev);
 extern void r600_ih_ring_fini(struct radeon_device *rdev);
@@ -1473,6 +1479,43 @@ static const u32 hawaii_mgcg_cgcg_init[] =
        0xd80c, 0xff000ff0, 0x00000100
 };
 
+static const u32 godavari_golden_registers[] =
+{
+       0x55e4, 0xff607fff, 0xfc000100,
+       0x6ed8, 0x00010101, 0x00010000,
+       0x9830, 0xffffffff, 0x00000000,
+       0x98302, 0xf00fffff, 0x00000400,
+       0x6130, 0xffffffff, 0x00010000,
+       0x5bb0, 0x000000f0, 0x00000070,
+       0x5bc0, 0xf0311fff, 0x80300000,
+       0x98f8, 0x73773777, 0x12010001,
+       0x98fc, 0xffffffff, 0x00000010,
+       0x8030, 0x00001f0f, 0x0000100a,
+       0x2f48, 0x73773777, 0x12010001,
+       0x2408, 0x000fffff, 0x000c007f,
+       0x8a14, 0xf000003f, 0x00000007,
+       0x8b24, 0xffffffff, 0x00ff0fff,
+       0x30a04, 0x0000ff0f, 0x00000000,
+       0x28a4c, 0x07ffffff, 0x06000000,
+       0x4d8, 0x00000fff, 0x00000100,
+       0xd014, 0x00010000, 0x00810001,
+       0xd814, 0x00010000, 0x00810001,
+       0x3e78, 0x00000001, 0x00000002,
+       0xc768, 0x00000008, 0x00000008,
+       0xc770, 0x00000f00, 0x00000800,
+       0xc774, 0x00000f00, 0x00000800,
+       0xc798, 0x00ffffff, 0x00ff7fbf,
+       0xc79c, 0x00ffffff, 0x00ff7faf,
+       0x8c00, 0x000000ff, 0x00000001,
+       0x214f8, 0x01ff01ff, 0x00000002,
+       0x21498, 0x007ff800, 0x00200000,
+       0x2015c, 0xffffffff, 0x00000f40,
+       0x88c4, 0x001f3ae3, 0x00000082,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x30934, 0xffffffff, 0x00000000
+};
+
+
 static void cik_init_golden_registers(struct radeon_device *rdev)
 {
        switch (rdev->family) {
@@ -1504,6 +1547,20 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
                                                 kalindi_golden_spm_registers,
                                                 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
                break;
+       case CHIP_MULLINS:
+               radeon_program_register_sequence(rdev,
+                                                kalindi_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+               radeon_program_register_sequence(rdev,
+                                                godavari_golden_registers,
+                                                (const u32)ARRAY_SIZE(godavari_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                kalindi_golden_common_registers,
+                                                (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
+               radeon_program_register_sequence(rdev,
+                                                kalindi_golden_spm_registers,
+                                                (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
+               break;
        case CHIP_KAVERI:
                radeon_program_register_sequence(rdev,
                                                 spectre_mgcg_cgcg_init,
@@ -1834,6 +1891,15 @@ static int cik_init_microcode(struct radeon_device *rdev)
                rlc_req_size = KB_RLC_UCODE_SIZE * 4;
                sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
                break;
+       case CHIP_MULLINS:
+               chip_name = "MULLINS";
+               pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
+               me_req_size = CIK_ME_UCODE_SIZE * 4;
+               ce_req_size = CIK_CE_UCODE_SIZE * 4;
+               mec_req_size = CIK_MEC_UCODE_SIZE * 4;
+               rlc_req_size = ML_RLC_UCODE_SIZE * 4;
+               sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+               break;
        default: BUG();
        }
 
@@ -3272,6 +3338,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
                gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_KABINI:
+       case CHIP_MULLINS:
        default:
                rdev->config.cik.max_shader_engines = 1;
                rdev->config.cik.max_tile_pipes = 2;
@@ -3702,6 +3769,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
+               radeon_semaphore_free(rdev, &sem, NULL);
                return r;
        }
 
@@ -5800,6 +5868,9 @@ static int cik_rlc_resume(struct radeon_device *rdev)
        case CHIP_KABINI:
                size = KB_RLC_UCODE_SIZE;
                break;
+       case CHIP_MULLINS:
+               size = ML_RLC_UCODE_SIZE;
+               break;
        }
 
        cik_rlc_stop(rdev);
@@ -6548,6 +6619,7 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
                buffer[count++] = cpu_to_le32(0x00000000);
                break;
        case CHIP_KABINI:
+       case CHIP_MULLINS:
                buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
                buffer[count++] = cpu_to_le32(0x00000000);
                break;
@@ -6693,6 +6765,19 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
                WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
                WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
        }
+       /* pflip */
+       if (rdev->num_crtc >= 2) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+       }
+       if (rdev->num_crtc >= 4) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+       }
+       if (rdev->num_crtc >= 6) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+       }
 
        /* dac hotplug */
        WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
@@ -7049,6 +7134,25 @@ int cik_irq_set(struct radeon_device *rdev)
                WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
        }
 
+       if (rdev->num_crtc >= 2) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+       }
+       if (rdev->num_crtc >= 4) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+       }
+       if (rdev->num_crtc >= 6) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+       }
+
        WREG32(DC_HPD1_INT_CONTROL, hpd1);
        WREG32(DC_HPD2_INT_CONTROL, hpd2);
        WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -7085,6 +7189,29 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
        rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
        rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
 
+       rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS +
+               EVERGREEN_CRTC0_REGISTER_OFFSET);
+       rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS +
+               EVERGREEN_CRTC1_REGISTER_OFFSET);
+       if (rdev->num_crtc >= 4) {
+               rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS +
+                       EVERGREEN_CRTC2_REGISTER_OFFSET);
+               rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS +
+                       EVERGREEN_CRTC3_REGISTER_OFFSET);
+       }
+       if (rdev->num_crtc >= 6) {
+               rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS +
+                       EVERGREEN_CRTC4_REGISTER_OFFSET);
+               rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS +
+                       EVERGREEN_CRTC5_REGISTER_OFFSET);
+       }
+
+       if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
+               WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_CLEAR);
+       if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
+               WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_CLEAR);
        if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
                WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
        if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
@@ -7095,6 +7222,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
                WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
 
        if (rdev->num_crtc >= 4) {
+               if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+                       WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+                              GRPH_PFLIP_INT_CLEAR);
+               if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+                       WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+                              GRPH_PFLIP_INT_CLEAR);
                if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
                        WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
                if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
@@ -7106,6 +7239,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
        }
 
        if (rdev->num_crtc >= 6) {
+               if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+                       WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+                              GRPH_PFLIP_INT_CLEAR);
+               if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+                       WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+                              GRPH_PFLIP_INT_CLEAR);
                if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
                        WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
                if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
@@ -7457,6 +7596,15 @@ restart_ih:
                                break;
                        }
                        break;
+               case 8: /* D1 page flip */
+               case 10: /* D2 page flip */
+               case 12: /* D3 page flip */
+               case 14: /* D4 page flip */
+               case 16: /* D5 page flip */
+               case 18: /* D6 page flip */
+                       DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+                       radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+                       break;
                case 42: /* HPD hotplug */
                        switch (src_data) {
                        case 0:
index f7e46cf682afdcbe051cb4a6ecf6dfe66570a5c3..72e464c79a88a777c27d2ada64667bf9e8798aa2 100644 (file)
@@ -562,6 +562,7 @@ int cik_copy_dma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
+               radeon_semaphore_free(rdev, &sem, NULL);
                return r;
        }
 
index 213873270d5f6b705974aa57867f0dff47b7901a..dd7926394a8fdaf6821fdd10151f98639c7c082f 100644 (file)
 #       define DC_HPD6_RX_INTERRUPT                     (1 << 18)
 #define DISP_INTERRUPT_STATUS_CONTINUE6                 0x6780
 
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS                                 0x6858
+#       define GRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define GRPH_PFLIP_INT_CLEAR                     (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define GRPH_INT_CONTROL                                0x685c
+#       define GRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
+
 #define        DAC_AUTODETECT_INT_CONTROL                      0x67c8
 
 #define DC_HPD1_INT_STATUS                              0x601c
index b406546440da7cda8d7da10f7acbf6e6c22186e0..0f7a51a3694f0fff5bde09c7a4bf57dd7bd51963 100644 (file)
@@ -4371,7 +4371,6 @@ int evergreen_irq_set(struct radeon_device *rdev)
        u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
        u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
        u32 grbm_int_cntl = 0;
-       u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
        u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
        u32 dma_cntl, dma_cntl1 = 0;
        u32 thermal_int = 0;
@@ -4554,15 +4553,21 @@ int evergreen_irq_set(struct radeon_device *rdev)
                WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
        }
 
-       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
-       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
+              GRPH_PFLIP_INT_MASK);
+       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
+              GRPH_PFLIP_INT_MASK);
        if (rdev->num_crtc >= 4) {
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
        }
        if (rdev->num_crtc >= 6) {
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
        }
 
        WREG32(DC_HPD1_INT_CONTROL, hpd1);
@@ -4951,6 +4956,15 @@ restart_ih:
                                break;
                        }
                        break;
+               case 8: /* D1 page flip */
+               case 10: /* D2 page flip */
+               case 12: /* D3 page flip */
+               case 14: /* D4 page flip */
+               case 16: /* D5 page flip */
+               case 18: /* D6 page flip */
+                       DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+                       radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+                       break;
                case 42: /* HPD hotplug */
                        switch (src_data) {
                        case 0:
index 287fe966d7de135161704b0e80ec38676ae1424b..478caefe0fef918011fadd78e1419ac92183ae54 100644 (file)
@@ -151,6 +151,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
+               radeon_semaphore_free(rdev, &sem, NULL);
                return r;
        }
 
index 16ec9d56a234b107742a13acd5a9684f62aabf42..3f6e817d97ee80cb0013c85818a1ec0c4c110e79 100644 (file)
@@ -546,6 +546,52 @@ static int kv_set_divider_value(struct radeon_device *rdev,
        return 0;
 }
 
+static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev,
+                                  struct sumo_vid_mapping_table *vid_mapping_table,
+                                  u32 vid_2bit)
+{
+       struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
+               &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+       u32 i;
+
+       if (vddc_sclk_table && vddc_sclk_table->count) {
+               if (vid_2bit < vddc_sclk_table->count)
+                       return vddc_sclk_table->entries[vid_2bit].v;
+               else
+                       return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
+       } else {
+               for (i = 0; i < vid_mapping_table->num_entries; i++) {
+                       if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
+                               return vid_mapping_table->entries[i].vid_7bit;
+               }
+               return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
+       }
+}
+
+static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev,
+                                  struct sumo_vid_mapping_table *vid_mapping_table,
+                                  u32 vid_7bit)
+{
+       struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
+               &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+       u32 i;
+
+       if (vddc_sclk_table && vddc_sclk_table->count) {
+               for (i = 0; i < vddc_sclk_table->count; i++) {
+                       if (vddc_sclk_table->entries[i].v == vid_7bit)
+                               return i;
+               }
+               return vddc_sclk_table->count - 1;
+       } else {
+               for (i = 0; i < vid_mapping_table->num_entries; i++) {
+                       if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
+                               return vid_mapping_table->entries[i].vid_2bit;
+               }
+
+               return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
+       }
+}
+
 static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
                                            u16 voltage)
 {
@@ -556,9 +602,9 @@ static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
                                            u32 vid_2bit)
 {
        struct kv_power_info *pi = kv_get_pi(rdev);
-       u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev,
-                                                &pi->sys_info.vid_mapping_table,
-                                                vid_2bit);
+       u32 vid_8bit = kv_convert_vid2_to_vid7(rdev,
+                                              &pi->sys_info.vid_mapping_table,
+                                              vid_2bit);
 
        return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
 }
@@ -639,7 +685,7 @@ static int kv_force_lowest_valid(struct radeon_device *rdev)
 
 static int kv_unforce_levels(struct radeon_device *rdev)
 {
-       if (rdev->family == CHIP_KABINI)
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
                return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
        else
                return kv_set_enabled_levels(rdev);
@@ -1362,13 +1408,20 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
        struct radeon_uvd_clock_voltage_dependency_table *table =
                &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
        int ret;
+       u32 mask;
 
        if (!gate) {
-               if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state)
+               if (table->count)
                        pi->uvd_boot_level = table->count - 1;
                else
                        pi->uvd_boot_level = 0;
 
+               if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
+                       mask = 1 << pi->uvd_boot_level;
+               } else {
+                       mask = 0x1f;
+               }
+
                ret = kv_copy_bytes_to_smc(rdev,
                                           pi->dpm_table_start +
                                           offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
@@ -1377,11 +1430,9 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
                if (ret)
                        return ret;
 
-               if (!pi->caps_uvd_dpm ||
-                   pi->caps_stable_p_state)
-                       kv_send_msg_to_smc_with_parameter(rdev,
-                                                         PPSMC_MSG_UVDDPM_SetEnabledMask,
-                                                         (1 << pi->uvd_boot_level));
+               kv_send_msg_to_smc_with_parameter(rdev,
+                                                 PPSMC_MSG_UVDDPM_SetEnabledMask,
+                                                 mask);
        }
 
        return kv_enable_uvd_dpm(rdev, !gate);
@@ -1617,7 +1668,7 @@ static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
        if (pi->acp_power_gated == gate)
                return;
 
-       if (rdev->family == CHIP_KABINI)
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
                return;
 
        pi->acp_power_gated = gate;
@@ -1786,7 +1837,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
                }
        }
 
-       if (rdev->family == CHIP_KABINI) {
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
                if (pi->enable_dpm) {
                        kv_set_valid_clock_range(rdev, new_ps);
                        kv_update_dfs_bypass_settings(rdev, new_ps);
@@ -1812,6 +1863,8 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
                                return ret;
                        }
                        kv_update_sclk_t(rdev);
+                       if (rdev->family == CHIP_MULLINS)
+                               kv_enable_nb_dpm(rdev);
                }
        } else {
                if (pi->enable_dpm) {
@@ -1862,7 +1915,7 @@ void kv_dpm_reset_asic(struct radeon_device *rdev)
 {
        struct kv_power_info *pi = kv_get_pi(rdev);
 
-       if (rdev->family == CHIP_KABINI) {
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
                kv_force_lowest_valid(rdev);
                kv_init_graphics_levels(rdev);
                kv_program_bootup_state(rdev);
@@ -1901,14 +1954,41 @@ static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
 static void kv_patch_voltage_values(struct radeon_device *rdev)
 {
        int i;
-       struct radeon_uvd_clock_voltage_dependency_table *table =
+       struct radeon_uvd_clock_voltage_dependency_table *uvd_table =
                &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+       struct radeon_vce_clock_voltage_dependency_table *vce_table =
+               &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+       struct radeon_clock_voltage_dependency_table *samu_table =
+               &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
+       struct radeon_clock_voltage_dependency_table *acp_table =
+               &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
 
-       if (table->count) {
-               for (i = 0; i < table->count; i++)
-                       table->entries[i].v =
+       if (uvd_table->count) {
+               for (i = 0; i < uvd_table->count; i++)
+                       uvd_table->entries[i].v =
                                kv_convert_8bit_index_to_voltage(rdev,
-                                                                table->entries[i].v);
+                                                                uvd_table->entries[i].v);
+       }
+
+       if (vce_table->count) {
+               for (i = 0; i < vce_table->count; i++)
+                       vce_table->entries[i].v =
+                               kv_convert_8bit_index_to_voltage(rdev,
+                                                                vce_table->entries[i].v);
+       }
+
+       if (samu_table->count) {
+               for (i = 0; i < samu_table->count; i++)
+                       samu_table->entries[i].v =
+                               kv_convert_8bit_index_to_voltage(rdev,
+                                                                samu_table->entries[i].v);
+       }
+
+       if (acp_table->count) {
+               for (i = 0; i < acp_table->count; i++)
+                       acp_table->entries[i].v =
+                               kv_convert_8bit_index_to_voltage(rdev,
+                                                                acp_table->entries[i].v);
        }
 
 }
@@ -1941,7 +2021,7 @@ static int kv_force_dpm_highest(struct radeon_device *rdev)
                        break;
        }
 
-       if (rdev->family == CHIP_KABINI)
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
                return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
        else
                return kv_set_enabled_level(rdev, i);
@@ -1961,7 +2041,7 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev)
                        break;
        }
 
-       if (rdev->family == CHIP_KABINI)
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
                return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
        else
                return kv_set_enabled_level(rdev, i);
@@ -2118,7 +2198,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
        else
                pi->battery_state = false;
 
-       if (rdev->family == CHIP_KABINI) {
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
                ps->dpm0_pg_nb_ps_lo = 0x1;
                ps->dpm0_pg_nb_ps_hi = 0x0;
                ps->dpmx_nb_ps_lo = 0x1;
@@ -2179,7 +2259,7 @@ static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
        if (pi->lowest_valid > pi->highest_valid)
                return -EINVAL;
 
-       if (rdev->family == CHIP_KABINI) {
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
                for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
                        pi->graphics_level[i].GnbSlow = 1;
                        pi->graphics_level[i].ForceNbPs1 = 0;
@@ -2253,9 +2333,9 @@ static void kv_init_graphics_levels(struct radeon_device *rdev)
                                break;
 
                        kv_set_divider_value(rdev, i, table->entries[i].clk);
-                       vid_2bit = sumo_convert_vid7_to_vid2(rdev,
-                                                            &pi->sys_info.vid_mapping_table,
-                                                            table->entries[i].v);
+                       vid_2bit = kv_convert_vid7_to_vid2(rdev,
+                                                          &pi->sys_info.vid_mapping_table,
+                                                          table->entries[i].v);
                        kv_set_vid(rdev, i, vid_2bit);
                        kv_set_at(rdev, i, pi->at[i]);
                        kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
@@ -2324,7 +2404,7 @@ static void kv_program_nbps_index_settings(struct radeon_device *rdev,
        struct kv_power_info *pi = kv_get_pi(rdev);
        u32 nbdpmconfig1;
 
-       if (rdev->family == CHIP_KABINI)
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
                return;
 
        if (pi->sys_info.nb_dpm_enable) {
@@ -2631,9 +2711,6 @@ int kv_dpm_init(struct radeon_device *rdev)
 
         pi->sram_end = SMC_RAM_END;
 
-       if (rdev->family == CHIP_KABINI)
-               pi->high_voltage_t = 4001;
-
        pi->enable_nb_dpm = true;
 
        pi->caps_power_containment = true;
index 6e887d004ebad7041e2080af850cc6ac2a12367d..bbc189fd3ddc47f57993689cac1dd08f2b5a9c69 100644 (file)
@@ -2839,6 +2839,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
+               radeon_semaphore_free(rdev, &sem, NULL);
                return r;
        }
 
@@ -3505,7 +3506,6 @@ int r600_irq_set(struct radeon_device *rdev)
        u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
        u32 grbm_int_cntl = 0;
        u32 hdmi0, hdmi1;
-       u32 d1grph = 0, d2grph = 0;
        u32 dma_cntl;
        u32 thermal_int = 0;
 
@@ -3614,8 +3614,8 @@ int r600_irq_set(struct radeon_device *rdev)
        WREG32(CP_INT_CNTL, cp_int_cntl);
        WREG32(DMA_CNTL, dma_cntl);
        WREG32(DxMODE_INT_MASK, mode_int);
-       WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
-       WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
+       WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
+       WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
        WREG32(GRBM_INT_CNTL, grbm_int_cntl);
        if (ASIC_IS_DCE3(rdev)) {
                WREG32(DC_HPD1_INT_CONTROL, hpd1);
@@ -3918,6 +3918,14 @@ restart_ih:
                                break;
                        }
                        break;
+               case 9: /* D1 pflip */
+                       DRM_DEBUG("IH: D1 flip\n");
+                       radeon_crtc_handle_flip(rdev, 0);
+                       break;
+               case 11: /* D2 pflip */
+                       DRM_DEBUG("IH: D2 flip\n");
+                       radeon_crtc_handle_flip(rdev, 1);
+                       break;
                case 19: /* HPD/DAC hotplug */
                        switch (src_data) {
                        case 0:
index 53fcb28f5578d76919fa182488b32be8a1c6278a..4969cef44a1911b706e933fb8252397ddd893785 100644 (file)
@@ -489,6 +489,7 @@ int r600_copy_dma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
+               radeon_semaphore_free(rdev, &sem, NULL);
                return r;
        }
 
index b58e1afdda7697e9a2e094c3f07bacc8a1621699..8149e7cf430330095da34f98d27f842832c2c91c 100644 (file)
@@ -730,6 +730,12 @@ struct cik_irq_stat_regs {
        u32 disp_int_cont4;
        u32 disp_int_cont5;
        u32 disp_int_cont6;
+       u32 d1grph_int;
+       u32 d2grph_int;
+       u32 d3grph_int;
+       u32 d4grph_int;
+       u32 d5grph_int;
+       u32 d6grph_int;
 };
 
 union radeon_irq_stat_regs {
@@ -1636,6 +1642,7 @@ struct radeon_vce {
        unsigned                fb_version;
        atomic_t                handles[RADEON_MAX_VCE_HANDLES];
        struct drm_file         *filp[RADEON_MAX_VCE_HANDLES];
+       unsigned                img_size[RADEON_MAX_VCE_HANDLES];
        struct delayed_work     idle_work;
 };
 
@@ -1649,7 +1656,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
                               uint32_t handle, struct radeon_fence **fence);
 void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
 void radeon_vce_note_usage(struct radeon_device *rdev);
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi);
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
 int radeon_vce_cs_parse(struct radeon_cs_parser *p);
 bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
                               struct radeon_ring *ring,
@@ -2634,7 +2641,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
 #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
 #define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
 #define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
-#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI))
+#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
+                            (rdev->family == CHIP_MULLINS))
 
 #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
                              (rdev->ddev->pdev->device == 0x6850) || \
index b8a24a75d4fff0e48e60b1f078f33e36c5209d94..be20e62dac83c5b6d96bcfc2b44a9140691b7bb8 100644 (file)
@@ -2516,6 +2516,7 @@ int radeon_asic_init(struct radeon_device *rdev)
                break;
        case CHIP_KAVERI:
        case CHIP_KABINI:
+       case CHIP_MULLINS:
                rdev->asic = &kv_asic;
                /* set num crtcs */
                if (rdev->family == CHIP_KAVERI) {
index b3633d9a531703a1cd4189c061a0907e89ee1085..9ab30976287d4c27e0dc94e0cadfa993e6ec77cc 100644 (file)
@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
                }
        }
 
+       if (!found) {
+               while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+                       dhandle = ACPI_HANDLE(&pdev->dev);
+                       if (!dhandle)
+                               continue;
+
+                       status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+                       if (!ACPI_FAILURE(status)) {
+                               found = true;
+                               break;
+                       }
+               }
+       }
+
        if (!found)
                return false;
 
index 511fe26198e4a3e5a779af5e7f82bf62be570da6..0e770bbf7e29d723457b5d58b83b7f3278f003d5 100644 (file)
@@ -99,6 +99,7 @@ static const char radeon_family_name[][16] = {
        "KAVERI",
        "KABINI",
        "HAWAII",
+       "MULLINS",
        "LAST",
 };
 
index 8d99d5ee8014c4f23e031a31cbeab85bde33d81a..f00dbbf4d806511a86b034a73c2d68adac971b99 100644 (file)
@@ -284,6 +284,10 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
        u32 update_pending;
        int vpos, hpos;
 
+       /* can happen during initialization */
+       if (radeon_crtc == NULL)
+               return;
+
        spin_lock_irqsave(&rdev->ddev->event_lock, flags);
        work = radeon_crtc->unpin_work;
        if (work == NULL ||
@@ -826,14 +830,14 @@ static void avivo_reduce_ratio(unsigned *nom, unsigned *den,
 
        /* make sure nominator is large enough */
         if (*nom < nom_min) {
-               tmp = (nom_min + *nom - 1) / *nom;
+               tmp = DIV_ROUND_UP(nom_min, *nom);
                *nom *= tmp;
                *den *= tmp;
        }
 
        /* make sure the denominator is large enough */
        if (*den < den_min) {
-               tmp = (den_min + *den - 1) / *den;
+               tmp = DIV_ROUND_UP(den_min, *den);
                *nom *= tmp;
                *den *= tmp;
        }
@@ -858,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
                                 unsigned *fb_div, unsigned *ref_div)
 {
        /* limit reference * post divider to a maximum */
-       ref_div_max = min(210 / post_div, ref_div_max);
+       ref_div_max = min(128 / post_div, ref_div_max);
 
        /* get matching reference and feedback divider */
        *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
@@ -993,6 +997,16 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
        /* this also makes sure that the reference divider is large enough */
        avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
 
+       /* avoid high jitter with small fractional dividers */
+       if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
+               fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
+               if (fb_div < fb_div_min) {
+                       unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
+                       fb_div *= tmp;
+                       ref_div *= tmp;
+               }
+       }
+
        /* and finally save the result */
        if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
                *fb_div_p = fb_div / 10;
index 9da5da4ffd176907d2b3f9b077e883cd9ab89323..4b7b87f71a6371308a64e78c7289a4e28117ef5f 100644 (file)
@@ -97,6 +97,7 @@ enum radeon_family {
        CHIP_KAVERI,
        CHIP_KABINI,
        CHIP_HAWAII,
+       CHIP_MULLINS,
        CHIP_LAST,
 };
 
index 0cc47f12d9957d916b41acf4d3874fa062313175..eaaedba0467595aaced591c6d70666c75ac97211 100644 (file)
@@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        return r;
                }
 
-               r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
-               if (r) {
-                       radeon_vm_fini(rdev, &fpriv->vm);
-                       kfree(fpriv);
-                       return r;
-               }
+               if (rdev->accel_working) {
+                       r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+                       if (r) {
+                               radeon_vm_fini(rdev, &fpriv->vm);
+                               kfree(fpriv);
+                               return r;
+                       }
 
-               /* map the ib pool buffer read only into
-                * virtual address space */
-               bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
-                                        rdev->ring_tmp_bo.bo);
-               r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
-                                         RADEON_VM_PAGE_READABLE |
-                                         RADEON_VM_PAGE_SNOOPED);
+                       /* map the ib pool buffer read only into
+                        * virtual address space */
+                       bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
+                                                rdev->ring_tmp_bo.bo);
+                       r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+                                                 RADEON_VM_PAGE_READABLE |
+                                                 RADEON_VM_PAGE_SNOOPED);
 
-               radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
-               if (r) {
-                       radeon_vm_fini(rdev, &fpriv->vm);
-                       kfree(fpriv);
-                       return r;
+                       radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+                       if (r) {
+                               radeon_vm_fini(rdev, &fpriv->vm);
+                               kfree(fpriv);
+                               return r;
+                       }
                }
-
                file_priv->driver_priv = fpriv;
        }
 
@@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
                struct radeon_bo_va *bo_va;
                int r;
 
-               r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
-               if (!r) {
-                       bo_va = radeon_vm_bo_find(&fpriv->vm,
-                                                 rdev->ring_tmp_bo.bo);
-                       if (bo_va)
-                               radeon_vm_bo_rmv(rdev, bo_va);
-                       radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+               if (rdev->accel_working) {
+                       r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+                       if (!r) {
+                               bo_va = radeon_vm_bo_find(&fpriv->vm,
+                                                         rdev->ring_tmp_bo.bo);
+                               if (bo_va)
+                                       radeon_vm_bo_rmv(rdev, bo_va);
+                               radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+                       }
                }
 
                radeon_vm_fini(rdev, &fpriv->vm);
index 19bec0dbfa38bf052db75cb52401adfcb29ac986..4faa4d6f9bb4f0616e0575d916069b47fa9389ed 100644 (file)
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                         * into account. We don't want to disallow buffer moves
                         * completely.
                         */
-                       if (current_domain != RADEON_GEM_DOMAIN_CPU &&
+                       if ((lobj->alt_domain & current_domain) != 0 &&
                            (domain & current_domain) == 0 && /* will be moved */
                            bytes_moved > bytes_moved_threshold) {
                                /* don't move it */
@@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        rbo = container_of(bo, struct radeon_bo, tbo);
        radeon_bo_check_tiling(rbo, 0, 0);
        rdev = rbo->rdev;
-       if (bo->mem.mem_type == TTM_PL_VRAM) {
-               size = bo->mem.num_pages << PAGE_SHIFT;
-               offset = bo->mem.start << PAGE_SHIFT;
-               if ((offset + size) > rdev->mc.visible_vram_size) {
-                       /* hurrah the memory is not visible ! */
-                       radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
-                       rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
-                       r = ttm_bo_validate(bo, &rbo->placement, false, false);
-                       if (unlikely(r != 0))
-                               return r;
-                       offset = bo->mem.start << PAGE_SHIFT;
-                       /* this should not happen */
-                       if ((offset + size) > rdev->mc.visible_vram_size)
-                               return -EINVAL;
-               }
+       if (bo->mem.mem_type != TTM_PL_VRAM)
+               return 0;
+
+       size = bo->mem.num_pages << PAGE_SHIFT;
+       offset = bo->mem.start << PAGE_SHIFT;
+       if ((offset + size) <= rdev->mc.visible_vram_size)
+               return 0;
+
+       /* hurrah the memory is not visible ! */
+       radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+       rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+       r = ttm_bo_validate(bo, &rbo->placement, false, false);
+       if (unlikely(r == -ENOMEM)) {
+               radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+               return ttm_bo_validate(bo, &rbo->placement, false, false);
+       } else if (unlikely(r != 0)) {
+               return r;
        }
+
+       offset = bo->mem.start << PAGE_SHIFT;
+       /* this should never happen */
+       if ((offset + size) > rdev->mc.visible_vram_size)
+               return -EINVAL;
+
        return 0;
 }
 
index 6fac8efe8340e2e99f83965201bab056de2997de..53d6e1bb48dc326bb3487fde5f02749df56d4e57 100644 (file)
@@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
+       /* Can't set profile when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        mutex_lock(&rdev->pm.mutex);
        if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
                if (strncmp("default", buf, strlen("default")) == 0)
@@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
+       /* Can't set method when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+               count = -EINVAL;
+               goto fail;
+       }
+
        /* we don't support the legacy modes with dpm */
        if (rdev->pm.pm_method == PM_METHOD_DPM) {
                count = -EINVAL;
@@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
 
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return snprintf(buf, PAGE_SIZE, "off\n");
+
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
                        (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
+       /* Can't set dpm state when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        mutex_lock(&rdev->pm.mutex);
        if (strncmp("battery", buf, strlen("battery")) == 0)
                rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
 
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return snprintf(buf, PAGE_SIZE, "off\n");
+
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
                        (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
@@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
        enum radeon_dpm_forced_level level;
        int ret = 0;
 
+       /* Can't force performance level when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        mutex_lock(&rdev->pm.mutex);
        if (strncmp("low", buf, strlen("low")) == 0) {
                level = RADEON_DPM_FORCED_LEVEL_LOW;
@@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
                                      char *buf)
 {
        struct radeon_device *rdev = dev_get_drvdata(dev);
+       struct drm_device *ddev = rdev->ddev;
        int temp;
 
+       /* Can't get temperature when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        if (rdev->asic->pm.get_temperature)
                temp = radeon_get_temperature(rdev);
        else
@@ -1300,6 +1336,7 @@ int radeon_pm_init(struct radeon_device *rdev)
        case CHIP_KABINI:
        case CHIP_KAVERI:
        case CHIP_HAWAII:
+       case CHIP_MULLINS:
                /* DPM requires the RLC, RV770+ dGPU requires SMC */
                if (!rdev->rlc_fw)
                        rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1613,8 +1650,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct radeon_device *rdev = dev->dev_private;
+       struct drm_device *ddev = rdev->ddev;
 
-       if (rdev->pm.dpm_enabled) {
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+               seq_printf(m, "PX asic powered off\n");
+       } else if (rdev->pm.dpm_enabled) {
                mutex_lock(&rdev->pm.mutex);
                if (rdev->asic->dpm.debugfs_print_current_performance_level)
                        radeon_dpm_debugfs_print_current_performance_level(rdev, m);
index 58d12938c0b80bf022bc8c5490a8cb660041fa5d..4e7c3269b183644ea87c4879d764eac7fb246e80 100644 (file)
@@ -52,6 +52,7 @@
 #define BONAIRE_RLC_UCODE_SIZE       2048
 #define KB_RLC_UCODE_SIZE            2560
 #define KV_RLC_UCODE_SIZE            2560
+#define ML_RLC_UCODE_SIZE            2560
 
 /* MC */
 #define BTC_MC_UCODE_SIZE            6024
index 5748bdaeacceb2f593bab46448ba5bf49fb940ae..1b65ae2433cd0e4063a546cb9806f6d917ba83a9 100644 (file)
@@ -99,6 +99,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
        case CHIP_KABINI:
        case CHIP_KAVERI:
        case CHIP_HAWAII:
+       case CHIP_MULLINS:
                fw_name = FIRMWARE_BONAIRE;
                break;
 
@@ -465,6 +466,10 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
        cmd = radeon_get_ib_value(p, p->idx) >> 1;
 
        if (cmd < 0x4) {
+               if (end <= start) {
+                       DRM_ERROR("invalid reloc offset %X!\n", offset);
+                       return -EINVAL;
+               }
                if ((end - start) < buf_sizes[cmd]) {
                        DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
                                  (unsigned)(end - start), buf_sizes[cmd]);
index ced53dd03e7c1f12449850c1eaa2d2568a552750..3971d968af6c0d86d0ba6e6f08f94d26714e17ec 100644 (file)
@@ -66,6 +66,7 @@ int radeon_vce_init(struct radeon_device *rdev)
        case CHIP_BONAIRE:
        case CHIP_KAVERI:
        case CHIP_KABINI:
+       case CHIP_MULLINS:
                fw_name = FIRMWARE_BONAIRE;
                break;
 
@@ -442,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
  * @p: parser context
  * @lo: address of lower dword
  * @hi: address of higher dword
+ * @size: size of checker for relocation buffer
  *
  * Patch relocation inside command stream with real buffer address
  */
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
+                       unsigned size)
 {
        struct radeon_cs_chunk *relocs_chunk;
-       uint64_t offset;
+       struct radeon_cs_reloc *reloc;
+       uint64_t start, end, offset;
        unsigned idx;
 
        relocs_chunk = &p->chunks[p->chunk_relocs_idx];
@@ -461,14 +465,59 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
                return -EINVAL;
        }
 
-       offset += p->relocs_ptr[(idx / 4)]->gpu_offset;
+       reloc = p->relocs_ptr[(idx / 4)];
+       start = reloc->gpu_offset;
+       end = start + radeon_bo_size(reloc->robj);
+       start += offset;
 
-        p->ib.ptr[lo] = offset & 0xFFFFFFFF;
-        p->ib.ptr[hi] = offset >> 32;
+       p->ib.ptr[lo] = start & 0xFFFFFFFF;
+       p->ib.ptr[hi] = start >> 32;
+
+       if (end <= start) {
+               DRM_ERROR("invalid reloc offset %llX!\n", offset);
+               return -EINVAL;
+       }
+       if ((end - start) < size) {
+               DRM_ERROR("buffer to small (%d / %d)!\n",
+                       (unsigned)(end - start), size);
+               return -EINVAL;
+       }
 
        return 0;
 }
 
+/**
+ * radeon_vce_validate_handle - validate stream handle
+ *
+ * @p: parser context
+ * @handle: handle to validate
+ *
+ * Validates the handle and return the found session index or -EINVAL
+ * we we don't have another free session index.
+ */
+int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
+{
+       unsigned i;
+
+       /* validate the handle */
+       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+               if (atomic_read(&p->rdev->vce.handles[i]) == handle)
+                       return i;
+       }
+
+       /* handle not found try to alloc a new one */
+       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+               if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
+                       p->rdev->vce.filp[i] = p->filp;
+                       p->rdev->vce.img_size[i] = 0;
+                       return i;
+               }
+       }
+
+       DRM_ERROR("No more free VCE handles!\n");
+       return -EINVAL;
+}
+
 /**
  * radeon_vce_cs_parse - parse and validate the command stream
  *
@@ -477,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
  */
 int radeon_vce_cs_parse(struct radeon_cs_parser *p)
 {
-       uint32_t handle = 0;
-       bool destroy = false;
+       int session_idx = -1;
+       bool destroyed = false;
+       uint32_t tmp, handle = 0;
+       uint32_t *size = &tmp;
        int i, r;
 
        while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
@@ -490,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        return -EINVAL;
                }
 
+               if (destroyed) {
+                       DRM_ERROR("No other command allowed after destroy!\n");
+                       return -EINVAL;
+               }
+
                switch (cmd) {
                case 0x00000001: // session
                        handle = radeon_get_ib_value(p, p->idx + 2);
+                       session_idx = radeon_vce_validate_handle(p, handle);
+                       if (session_idx < 0)
+                               return session_idx;
+                       size = &p->rdev->vce.img_size[session_idx];
                        break;
 
                case 0x00000002: // task info
+                       break;
+
                case 0x01000001: // create
+                       *size = radeon_get_ib_value(p, p->idx + 8) *
+                               radeon_get_ib_value(p, p->idx + 10) *
+                               8 * 3 / 2;
+                       break;
+
                case 0x04000001: // config extension
                case 0x04000002: // pic control
                case 0x04000005: // rate control
@@ -505,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        break;
 
                case 0x03000001: // encode
-                       r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9);
+                       r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
+                                               *size);
                        if (r)
                                return r;
 
-                       r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11);
+                       r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
+                                               *size / 3);
                        if (r)
                                return r;
                        break;
 
                case 0x02000001: // destroy
-                       destroy = true;
+                       destroyed = true;
                        break;
 
                case 0x05000001: // context buffer
+                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+                                               *size * 2);
+                       if (r)
+                               return r;
+                       break;
+
                case 0x05000004: // video bitstream buffer
+                       tmp = radeon_get_ib_value(p, p->idx + 4);
+                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+                                               tmp);
+                       if (r)
+                               return r;
+                       break;
+
                case 0x05000005: // feedback buffer
-                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2);
+                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+                                               4096);
                        if (r)
                                return r;
                        break;
@@ -531,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        return -EINVAL;
                }
 
+               if (session_idx == -1) {
+                       DRM_ERROR("no session command at start of IB\n");
+                       return -EINVAL;
+               }
+
                p->idx += len / 4;
        }
 
-       if (destroy) {
+       if (destroyed) {
                /* IB contains a destroy msg, free the handle */
                for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
                        atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
-
-               return 0;
-        }
-
-       /* create or encode, validate the handle */
-       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
-               if (atomic_read(&p->rdev->vce.handles[i]) == handle)
-                       return 0;
        }
 
-       /* handle not found try to alloc a new one */
-       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
-               if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
-                       p->rdev->vce.filp[i] = p->filp;
-                       return 0;
-               }
-       }
-
-       DRM_ERROR("No more free VCE handles!\n");
-       return -EINVAL;
+       return 0;
 }
 
 /**
index 2aae6ce49d3286888ea22347b60d71cc5220eef4..d9ab99f47612743bb41361a8cca8114999e52fca 100644 (file)
@@ -595,7 +595,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
        ndw = 64;
 
        /* assume the worst case */
-       ndw += vm->max_pde_used * 12;
+       ndw += vm->max_pde_used * 16;
 
        /* update too big for an IB */
        if (ndw > 0xfffff)
index aca8cbe8a335dce1d13487c60e13f3193630fec7..bbf2e076ee457816924a736c57d2192cabba88ac 100644 (file)
@@ -86,6 +86,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
+               radeon_semaphore_free(rdev, &sem, NULL);
                return r;
        }
 
index ac708e006180d3467cfe0478c2484eeb64a2df68..22a63c98ba14c688ab259fa666fe3e5d111fb792 100644 (file)
@@ -5780,7 +5780,6 @@ int si_irq_set(struct radeon_device *rdev)
        u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
        u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
        u32 grbm_int_cntl = 0;
-       u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
        u32 dma_cntl, dma_cntl1;
        u32 thermal_int = 0;
 
@@ -5919,16 +5918,22 @@ int si_irq_set(struct radeon_device *rdev)
        }
 
        if (rdev->num_crtc >= 2) {
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
        }
        if (rdev->num_crtc >= 4) {
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
        }
        if (rdev->num_crtc >= 6) {
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
        }
 
        if (!ASIC_IS_NODCE(rdev)) {
@@ -6292,6 +6297,15 @@ restart_ih:
                                break;
                        }
                        break;
+               case 8: /* D1 page flip */
+               case 10: /* D2 page flip */
+               case 12: /* D3 page flip */
+               case 14: /* D4 page flip */
+               case 16: /* D5 page flip */
+               case 18: /* D6 page flip */
+                       DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+                       radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+                       break;
                case 42: /* HPD hotplug */
                        switch (src_data) {
                        case 0:
index cf0fdad8c278ef6921bbab3677c2cd596bcf1575..de0ca070122f62ee0c8b7fcc6d8db0360adfb948 100644 (file)
@@ -213,6 +213,7 @@ int si_copy_dma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
+               radeon_semaphore_free(rdev, &sem, NULL);
                return r;
        }
 
index 683532f849311d1ca19de3daccb168b368d4c0b5..7321283602ce0c1d8429193efe4221ba26a33761 100644 (file)
 #define                SPLL_CHG_STATUS                         (1 << 1)
 #define        SPLL_CNTL_MODE                                  0x618
 #define                SPLL_SW_DIR_CONTROL                     (1 << 0)
-#      define SPLL_REFCLK_SEL(x)                       ((x) << 8)
-#      define SPLL_REFCLK_SEL_MASK                     0xFF00
+#      define SPLL_REFCLK_SEL(x)                       ((x) << 26)
+#      define SPLL_REFCLK_SEL_MASK                     (3 << 26)
 
 #define        CG_SPLL_SPREAD_SPECTRUM                         0x620
 #define                SSEN                                    (1 << 0)
index 0a243f0e5d6889129fff88d3f5a3656450dd14cf..be42c8125203b22bd62d9fc4be8510672ba577b3 100644 (file)
@@ -83,7 +83,10 @@ int uvd_v1_0_init(struct radeon_device *rdev)
        int r;
 
        /* raise clocks while booting up the VCPU */
-       radeon_set_uvd_clocks(rdev, 53300, 40000);
+       if (rdev->family < CHIP_RV740)
+               radeon_set_uvd_clocks(rdev, 10000, 10000);
+       else
+               radeon_set_uvd_clocks(rdev, 53300, 40000);
 
        r = uvd_v1_0_start(rdev);
        if (r)
@@ -407,7 +410,10 @@ int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        struct radeon_fence *fence = NULL;
        int r;
 
-       r = radeon_set_uvd_clocks(rdev, 53300, 40000);
+       if (rdev->family < CHIP_RV740)
+               r = radeon_set_uvd_clocks(rdev, 10000, 10000);
+       else
+               r = radeon_set_uvd_clocks(rdev, 53300, 40000);
        if (r) {
                DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
                return r;
index 10a2c08664596a3c43d1687cab911ad3fc3ab970..da52279de939652c9f1cffc1f60cf4308ce795ea 100644 (file)
@@ -1253,7 +1253,8 @@ EXPORT_SYMBOL_GPL(hid_output_report);
 
 static int hid_report_len(struct hid_report *report)
 {
-       return ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7;
+       /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
+       return ((report->size - 1) >> 3) + 1 + (report->id > 0);
 }
 
 /*
@@ -1266,7 +1267,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
         * of implement() working on 8 byte chunks
         */
 
-       int len = hid_report_len(report);
+       int len = hid_report_len(report) + 7;
 
        return kmalloc(len, flags);
 }
index c8af7202c28da3027e73cfd0a9d8b95d6036dbe1..34bb2205d2ea21bed4593d2b50a3f9e9e3e5b52f 100644 (file)
 
 #define USB_VENDOR_ID_DREAM_CHEEKY     0x1d34
 
+#define USB_VENDOR_ID_ELITEGROUP       0x03fc
+#define USB_DEVICE_ID_ELITEGROUP_05D8  0x05d8
+
 #define USB_VENDOR_ID_ELO              0x04E7
 #define USB_DEVICE_ID_ELO_TS2515       0x0022
 #define USB_DEVICE_ID_ELO_TS2700       0x0020
 #define USB_DEVICE_ID_SYNAPTICS_LTS2   0x1d10
 #define USB_DEVICE_ID_SYNAPTICS_HD     0x0ac3
 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD        0x1ac3
+#define USB_DEVICE_ID_SYNAPTICS_TP_V103        0x5710
+
+#define USB_VENDOR_ID_TEXAS_INSTRUMENTS        0x2047
+#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA    0x0855
 
 #define USB_VENDOR_ID_THINGM           0x27b8
 #define USB_DEVICE_ID_BLINK1           0x01ed
index 35278e43c7a48d1999283c21f0f60cceccbc3b84..51e25b9407f259dfa219012aa9f28b274f764f27 100644 (file)
@@ -1155,6 +1155,11 @@ static const struct hid_device_id mt_devices[] = {
                MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
                        USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
 
+       /* Elitegroup panel */
+       { .driver_data = MT_CLS_SERIAL,
+               MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
+                       USB_DEVICE_ID_ELITEGROUP_05D8) },
+
        /* Flatfrog Panels */
        { .driver_data = MT_CLS_FLATFROG,
                MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG,
index af8244b1c1f428dab026544802ca951089a02fd1..be14b5690e942d5a1c47d9e2f45b706f804d2162 100644 (file)
@@ -708,6 +708,9 @@ static const struct hid_device_id sensor_hub_devices[] = {
        { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
                        USB_DEVICE_ID_STM_HID_SENSOR),
                        .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+       { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_TEXAS_INSTRUMENTS,
+                       USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA),
+                       .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
        { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
                     HID_ANY_ID) },
        { }
index dbd83878ff99ec029a1cda07b265ddcd27418710..8e4ddb369883257384a2d0fb315c4ab231eb486a 100644 (file)
@@ -119,6 +119,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
 
        { 0, 0 }
 };
index bc196f49ec53be2e13959a9ebca9aa9c82edcfff..4af0da96c2e22a6d18474cf60e34649acc11167b 100644 (file)
@@ -1053,7 +1053,7 @@ config SENSORS_PC87427
 
 config SENSORS_NTC_THERMISTOR
        tristate "NTC thermistor support"
-       depends on (!OF && !IIO) || (OF && IIO)
+       depends on !OF || IIO=n || IIO
        help
          This driver supports NTC thermistors sensor reading and its
          interpretation. The driver can also monitor the temperature and
index 90ec1173b8a125c629542e079cac66872ea60ec4..01723f04fe45bfa449b60d07a05cd905cc9ccc98 100644 (file)
@@ -163,7 +163,7 @@ static ssize_t store_hyst(struct device *dev,
        if (retval < 0)
                goto fail;
 
-       hyst = val - retval * 1000;
+       hyst = retval * 1000 - val;
        hyst = DIV_ROUND_CLOSEST(hyst, 1000);
        if (hyst < 0 || hyst > 255) {
                retval = -ERANGE;
@@ -330,7 +330,7 @@ static int emc1403_detect(struct i2c_client *client,
        }
 
        id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
-       if (id != 0x01)
+       if (id < 0x01 || id > 0x04)
                return -ENODEV;
 
        return 0;
@@ -355,9 +355,9 @@ static int emc1403_probe(struct i2c_client *client,
        if (id->driver_data)
                data->groups[1] = &emc1404_group;
 
-       hwmon_dev = hwmon_device_register_with_groups(&client->dev,
-                                                     client->name, data,
-                                                     data->groups);
+       hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+                                                          client->name, data,
+                                                          data->groups);
        if (IS_ERR(hwmon_dev))
                return PTR_ERR(hwmon_dev);
 
index 8a17f01e8672065d7099e4c3ae156f93705eade4..e76feb86a1d4c0b07ab773675ef1244b88ee9b98 100644 (file)
@@ -44,6 +44,7 @@ struct ntc_compensation {
        unsigned int    ohm;
 };
 
+/* Order matters, ntc_match references the entries by index */
 static const struct platform_device_id ntc_thermistor_id[] = {
        { "ncp15wb473", TYPE_NCPXXWB473 },
        { "ncp18wb473", TYPE_NCPXXWB473 },
@@ -141,7 +142,7 @@ struct ntc_data {
        char name[PLATFORM_NAME_SIZE];
 };
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
 static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
 {
        struct iio_channel *channel = pdata->chan;
@@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
 
 static const struct of_device_id ntc_match[] = {
        { .compatible = "ntc,ncp15wb473",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+               .data = &ntc_thermistor_id[0] },
        { .compatible = "ntc,ncp18wb473",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+               .data = &ntc_thermistor_id[1] },
        { .compatible = "ntc,ncp21wb473",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+               .data = &ntc_thermistor_id[2] },
        { .compatible = "ntc,ncp03wb473",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+               .data = &ntc_thermistor_id[3] },
        { .compatible = "ntc,ncp15wl333",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWL333] },
+               .data = &ntc_thermistor_id[4] },
        { },
 };
 MODULE_DEVICE_TABLE(of, ntc_match);
@@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
        return NULL;
 }
 
+#define ntc_match      NULL
+
 static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
 { }
 #endif
index 22e92c3d3d07448cea9aa37fcc4ad6c612570e13..3c20e4bd6dd1380238df20f06941828ec84aadd0 100644 (file)
@@ -422,6 +422,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
         */
        dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
 
+       /* enforce disabled interrupts (due to HW issues) */
+       i2c_dw_disable_int(dev);
+
        /* Enable the adapter */
        __i2c_dw_enable(dev, true);
 
index 28cbe1b2a2ec2958547b843d2f86d672fedb5e0b..32c85e9ecdaeb141f0653c3a7e8c85784a1140ce 100644 (file)
@@ -999,7 +999,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
 
        dev->virtbase = devm_ioremap(&adev->dev, adev->res.start,
                                resource_size(&adev->res));
-       if (IS_ERR(dev->virtbase)) {
+       if (!dev->virtbase) {
                ret = -ENOMEM;
                goto err_no_mem;
        }
index 1b4cf14f1106aac597b1ab0346ed848e2139ba22..2a5efb5b487cdc2e4fb8774e94167f6af53ca7d8 100644 (file)
@@ -479,7 +479,7 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
        int ret, idx;
 
        ret = pm_runtime_get_sync(qup->dev);
-       if (ret)
+       if (ret < 0)
                goto out;
 
        writel(1, qup->base + QUP_SW_RESET);
index d4fa8eba6e9d2e40e11cb0ccba5ee64606dd8fff..06d47aafbb79c75a18a66e12964544a9a2b3496f 100644 (file)
@@ -561,6 +561,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 
        ret = -EINVAL;
        for (i = 0; i < num; i++) {
+               /* This HW can't send STOP after address phase */
+               if (msgs[i].len == 0) {
+                       ret = -EOPNOTSUPP;
+                       break;
+               }
+
                /*-------------- spin lock -----------------*/
                spin_lock_irqsave(&priv->lock, flags);
 
@@ -625,7 +631,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 
 static u32 rcar_i2c_func(struct i2c_adapter *adap)
 {
-       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+       /* This HW can't do SMBUS_QUICK and NOSTART */
+       return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
 }
 
 static const struct i2c_algorithm rcar_i2c_algo = {
index ae4491062e411a83a5729a1e267e2430f23f069c..bb3a9964f7e00c2b2c604e32490b0c5f015c4bfe 100644 (file)
@@ -1276,10 +1276,10 @@ static int s3c24xx_i2c_resume(struct device *dev)
        struct platform_device *pdev = to_platform_device(dev);
        struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
 
-       i2c->suspended = 0;
        clk_prepare_enable(i2c->clk);
        s3c24xx_i2c_init(i2c);
        clk_disable_unprepare(i2c->clk);
+       i2c->suspended = 0;
 
        return 0;
 }
index 1b6dbe156a3708692a743cd58fc3351d4b01e533..199c7896f08188ca40fa9cd30462f0631d387338 100644 (file)
@@ -48,6 +48,7 @@
 
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
 
 #include "mlx4_ib.h"
 #include "user.h"
@@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
 }
 #endif
 
+#define MLX4_IB_INVALID_MAC    ((u64)-1)
+static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
+                              struct net_device *dev,
+                              int port)
+{
+       u64 new_smac = 0;
+       u64 release_mac = MLX4_IB_INVALID_MAC;
+       struct mlx4_ib_qp *qp;
+
+       read_lock(&dev_base_lock);
+       new_smac = mlx4_mac_to_u64(dev->dev_addr);
+       read_unlock(&dev_base_lock);
+
+       mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
+       qp = ibdev->qp1_proxy[port - 1];
+       if (qp) {
+               int new_smac_index;
+               u64 old_smac = qp->pri.smac;
+               struct mlx4_update_qp_params update_params;
+
+               if (new_smac == old_smac)
+                       goto unlock;
+
+               new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
+
+               if (new_smac_index < 0)
+                       goto unlock;
+
+               update_params.smac_index = new_smac_index;
+               if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
+                                  &update_params)) {
+                       release_mac = new_smac;
+                       goto unlock;
+               }
+
+               qp->pri.smac = new_smac;
+               qp->pri.smac_index = new_smac_index;
+
+               release_mac = old_smac;
+       }
+
+unlock:
+       mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
+       if (release_mac != MLX4_IB_INVALID_MAC)
+               mlx4_unregister_mac(ibdev->dev, port, release_mac);
+}
+
 static void mlx4_ib_get_dev_addr(struct net_device *dev,
                                 struct mlx4_ib_dev *ibdev, u8 port)
 {
@@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
        return 0;
 }
 
-static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
+static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
+                                struct net_device *dev,
+                                unsigned long event)
+
 {
        struct mlx4_ib_iboe *iboe;
+       int update_qps_port = -1;
        int port;
 
        iboe = &ibdev->iboe;
@@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
                }
                curr_master = iboe->masters[port - 1];
 
+               if (dev == iboe->netdevs[port - 1] &&
+                   (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
+                    event == NETDEV_UP || event == NETDEV_CHANGE))
+                       update_qps_port = port;
+
                if (curr_netdev) {
                        port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
                                                IB_PORT_ACTIVE : IB_PORT_DOWN;
@@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
        }
 
        spin_unlock(&iboe->lock);
+
+       if (update_qps_port > 0)
+               mlx4_ib_update_qps(ibdev, dev, update_qps_port);
 }
 
 static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this,
                return NOTIFY_DONE;
 
        ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
-       mlx4_ib_scan_netdevs(ibdev);
+       mlx4_ib_scan_netdevs(ibdev, dev, event);
 
        return NOTIFY_DONE;
 }
@@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                goto err_map;
 
        for (i = 0; i < ibdev->num_ports; ++i) {
+               mutex_init(&ibdev->qp1_proxy_lock[i]);
                if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
                                                IB_LINK_LAYER_ETHERNET) {
                        err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
@@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                for (i = 1 ; i <= ibdev->num_ports ; ++i)
                        reset_gid_table(ibdev, i);
                rtnl_lock();
-               mlx4_ib_scan_netdevs(ibdev);
+               mlx4_ib_scan_netdevs(ibdev, NULL, 0);
                rtnl_unlock();
                mlx4_ib_init_gid_table(ibdev);
        }
index f589522fddfd9efa4e32fdd0a7e8f63e49a54927..66b0b7dbd9f41cac95cfb76b4e766249e1abd83b 100644 (file)
@@ -522,6 +522,9 @@ struct mlx4_ib_dev {
        int steer_qpn_count;
        int steer_qpn_base;
        int steering_support;
+       struct mlx4_ib_qp      *qp1_proxy[MLX4_MAX_PORTS];
+       /* lock when destroying qp1_proxy and getting netdev events */
+       struct mutex            qp1_proxy_lock[MLX4_MAX_PORTS];
 };
 
 struct ib_event_work {
index 41308af4163c3dc852adc23f983bdd86d279374e..dc57482ae7af2b2ed8935676a67b459847b3302c 100644 (file)
@@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
        if (is_qp0(dev, mqp))
                mlx4_CLOSE_PORT(dev->dev, mqp->port);
 
+       if (dev->qp1_proxy[mqp->port - 1] == mqp) {
+               mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
+               dev->qp1_proxy[mqp->port - 1] = NULL;
+               mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
+       }
+
        pd = get_pd(mqp);
        destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
 
@@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                                err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
                                if (err)
                                        return -EINVAL;
+                               if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
+                                       dev->qp1_proxy[qp->port - 1] = qp;
                        }
                }
        }
index c98fdb185931644bd3ed92d337f85aa0d2517149..a1710465faaf2345ae9c88755790e3e74a6e76e8 100644 (file)
@@ -28,6 +28,7 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
+#include <linux/semaphore.h>
 
 #include "isert_proto.h"
 #include "ib_isert.h"
@@ -561,7 +562,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        struct isert_device *device;
        struct ib_device *ib_dev = cma_id->device;
        int ret = 0;
-       u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
+       u8 pi_support;
+
+       spin_lock_bh(&np->np_thread_lock);
+       if (!np->enabled) {
+               spin_unlock_bh(&np->np_thread_lock);
+               pr_debug("iscsi_np is not enabled, reject connect request\n");
+               return rdma_reject(cma_id, NULL, 0);
+       }
+       spin_unlock_bh(&np->np_thread_lock);
 
        pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
                 cma_id, cma_id->context);
@@ -652,6 +661,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
                goto out_mr;
        }
 
+       pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
        if (pi_support && !device->pi_capable) {
                pr_err("Protection information requested but not supported\n");
                ret = -EINVAL;
@@ -663,11 +673,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
                goto out_conn_dev;
 
        mutex_lock(&isert_np->np_accept_mutex);
-       list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
+       list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
        mutex_unlock(&isert_np->np_accept_mutex);
 
-       pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
-       wake_up(&isert_np->np_accept_wq);
+       pr_debug("isert_connect_request() up np_sem np: %p\n", np);
+       up(&isert_np->np_sem);
        return 0;
 
 out_conn_dev:
@@ -2999,7 +3009,7 @@ isert_setup_np(struct iscsi_np *np,
                pr_err("Unable to allocate struct isert_np\n");
                return -ENOMEM;
        }
-       init_waitqueue_head(&isert_np->np_accept_wq);
+       sema_init(&isert_np->np_sem, 0);
        mutex_init(&isert_np->np_accept_mutex);
        INIT_LIST_HEAD(&isert_np->np_accept_list);
        init_completion(&isert_np->np_login_comp);
@@ -3047,18 +3057,6 @@ out:
        return ret;
 }
 
-static int
-isert_check_accept_queue(struct isert_np *isert_np)
-{
-       int empty;
-
-       mutex_lock(&isert_np->np_accept_mutex);
-       empty = list_empty(&isert_np->np_accept_list);
-       mutex_unlock(&isert_np->np_accept_mutex);
-
-       return empty;
-}
-
 static int
 isert_rdma_accept(struct isert_conn *isert_conn)
 {
@@ -3151,16 +3149,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
        int max_accept = 0, ret;
 
 accept_wait:
-       ret = wait_event_interruptible(isert_np->np_accept_wq,
-                       !isert_check_accept_queue(isert_np) ||
-                       np->np_thread_state == ISCSI_NP_THREAD_RESET);
+       ret = down_interruptible(&isert_np->np_sem);
        if (max_accept > 5)
                return -ENODEV;
 
        spin_lock_bh(&np->np_thread_lock);
        if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
                spin_unlock_bh(&np->np_thread_lock);
-               pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
+               pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
                return -ENODEV;
        }
        spin_unlock_bh(&np->np_thread_lock);
index 4c072ae34c01a3021e57cb5378a3949b97d0876c..da6612e6800004b0984880d54ef57bcaab1b0be1 100644 (file)
@@ -182,7 +182,7 @@ struct isert_device {
 };
 
 struct isert_np {
-       wait_queue_head_t       np_accept_wq;
+       struct semaphore        np_sem;
        struct rdma_cm_id       *np_cm_id;
        struct mutex            np_accept_mutex;
        struct list_head        np_accept_list;
index 76842d7dc2e3dac1c8964e22141f0de8c20e1d39..ffc7ad3a2c881fefa35e2aeee4a243445a4aaa5f 100644 (file)
@@ -71,7 +71,7 @@ config KEYBOARD_ATKBD
        default y
        select SERIO
        select SERIO_LIBPS2
-       select SERIO_I8042 if X86
+       select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
        select SERIO_GSCPS2 if GSC
        help
          Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
index 2626773ff29b956de97d5c62d383e36e13ee4868..2dd1d0dd4f7de03233752e57704ff60c17e6d992 100644 (file)
@@ -243,6 +243,12 @@ static void (*atkbd_platform_fixup)(struct atkbd *, const void *data);
 static void *atkbd_platform_fixup_data;
 static unsigned int (*atkbd_platform_scancode_fixup)(struct atkbd *, unsigned int);
 
+/*
+ * Certain keyboards to not like ATKBD_CMD_RESET_DIS and stop responding
+ * to many commands until full reset (ATKBD_CMD_RESET_BAT) is performed.
+ */
+static bool atkbd_skip_deactivate;
+
 static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf,
                                ssize_t (*handler)(struct atkbd *, char *));
 static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count,
@@ -768,7 +774,8 @@ static int atkbd_probe(struct atkbd *atkbd)
  * Make sure nothing is coming from the keyboard and disturbs our
  * internal state.
  */
-       atkbd_deactivate(atkbd);
+       if (!atkbd_skip_deactivate)
+               atkbd_deactivate(atkbd);
 
        return 0;
 }
@@ -1638,6 +1645,12 @@ static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id)
        return 1;
 }
 
+static int __init atkbd_deactivate_fixup(const struct dmi_system_id *id)
+{
+       atkbd_skip_deactivate = true;
+       return 1;
+}
+
 static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
        {
                .matches = {
@@ -1775,6 +1788,20 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
                .callback = atkbd_setup_scancode_fixup,
                .driver_data = atkbd_oqo_01plus_scancode_fixup,
        },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"),
+               },
+               .callback = atkbd_deactivate_fixup,
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"),
+               },
+               .callback = atkbd_deactivate_fixup,
+       },
        { }
 };
 
index d8241ba0afa0737b5779371563c5623969931df5..a15063bea70020bc44b060a5e9aff8e8133de09e 100644 (file)
@@ -111,6 +111,8 @@ struct pxa27x_keypad {
        unsigned short keycodes[MAX_KEYPAD_KEYS];
        int rotary_rel_code[2];
 
+       unsigned int row_shift;
+
        /* state row bits of each column scan */
        uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS];
        uint32_t direct_key_state;
@@ -467,7 +469,8 @@ scan:
                        if ((bits_changed & (1 << row)) == 0)
                                continue;
 
-                       code = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT);
+                       code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
+
                        input_event(input_dev, EV_MSC, MSC_SCAN, code);
                        input_report_key(input_dev, keypad->keycodes[code],
                                         new_state[col] & (1 << row));
@@ -802,6 +805,8 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
                goto failed_put_clk;
        }
 
+       keypad->row_shift = get_count_order(pdata->matrix_key_cols);
+
        if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) ||
            (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) {
                input_dev->evbit[0] |= BIT_MASK(EV_REL);
index 55c15304ddbce997a1027e319b81e7b3fc8ba98b..4e491c1762cfe5e1ecd9bd562e8244237261aa07 100644 (file)
@@ -392,6 +392,13 @@ static const struct of_device_id tca8418_dt_ids[] = {
        { }
 };
 MODULE_DEVICE_TABLE(of, tca8418_dt_ids);
+
+/*
+ * The device tree based i2c loader looks for
+ * "i2c:" + second_component_of(property("compatible"))
+ * and therefore we need an alias to be found.
+ */
+MODULE_ALIAS("i2c:tca8418");
 #endif
 
 static struct i2c_driver tca8418_keypad_driver = {
index 52d3a9b28f0b80a253eb04584016b767c90c22cc..b36831c828d3fe7a19be14872a7eabb2b8a38ecd 100644 (file)
@@ -70,6 +70,7 @@
 #define BMA150_CFG_5_REG       0x11
 
 #define BMA150_CHIP_ID         2
+#define BMA180_CHIP_ID         3
 #define BMA150_CHIP_ID_REG     BMA150_DATA_0_REG
 
 #define BMA150_ACC_X_LSB_REG   BMA150_DATA_2_REG
@@ -539,7 +540,7 @@ static int bma150_probe(struct i2c_client *client,
        }
 
        chip_id = i2c_smbus_read_byte_data(client, BMA150_CHIP_ID_REG);
-       if (chip_id != BMA150_CHIP_ID) {
+       if (chip_id != BMA150_CHIP_ID && chip_id != BMA180_CHIP_ID) {
                dev_err(&client->dev, "BMA150 chip id error: %d\n", chip_id);
                return -EINVAL;
        }
@@ -643,6 +644,7 @@ static UNIVERSAL_DEV_PM_OPS(bma150_pm, bma150_suspend, bma150_resume, NULL);
 
 static const struct i2c_device_id bma150_id[] = {
        { "bma150", 0 },
+       { "bma180", 0 },
        { "smb380", 0 },
        { "bma023", 0 },
        { }
index effa9c5f2c5cc6043f0ca988261b02f516c85be6..6b8441f7bc3282595feaa1378e12d871943c13c9 100644 (file)
@@ -17,7 +17,7 @@ config MOUSE_PS2
        default y
        select SERIO
        select SERIO_LIBPS2
-       select SERIO_I8042 if X86
+       select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
        select SERIO_GSCPS2 if GSC
        help
          Say Y here if you have a PS/2 mouse connected to your system. This
index 088d3541c7d3d4485380dd859e9777c6e39c94d2..b96e978a37b76a4a43ab15bf6b80e1a295a8767a 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 #include <linux/delay.h>
+#include <linux/dmi.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/input.h>
@@ -831,7 +832,11 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
                break;
 
        case 3:
-               etd->reg_10 = 0x0b;
+               if (etd->set_hw_resolution)
+                       etd->reg_10 = 0x0b;
+               else
+                       etd->reg_10 = 0x03;
+
                if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
                        rc = -1;
 
@@ -1330,6 +1335,22 @@ static int elantech_reconnect(struct psmouse *psmouse)
        return 0;
 }
 
+/*
+ * Some hw_version 3 models go into error state when we try to set bit 3 of r10
+ */
+static const struct dmi_system_id no_hw_res_dmi_table[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+       {
+               /* Gigabyte U2442 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "U2442"),
+               },
+       },
+#endif
+       { }
+};
+
 /*
  * determine hardware version and set some properties according to it.
  */
@@ -1390,6 +1411,9 @@ static int elantech_set_properties(struct elantech_data *etd)
         */
        etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
 
+       /* Enable real hardware resolution on hw_version 3 ? */
+       etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table);
+
        return 0;
 }
 
index 036a04abaef72314a8dcc7d444f6ab3feea377f9..9e0e2a1f340d52817fc18bf79d849bed6d340932 100644 (file)
@@ -130,6 +130,7 @@ struct elantech_data {
        bool jumpy_cursor;
        bool reports_pressure;
        bool crc_enabled;
+       bool set_hw_resolution;
        unsigned char hw_version;
        unsigned int fw_version;
        unsigned int single_finger_reports;
index ef9f4913450d12a06262dd906b8ebe93b0401dbe..c5ec703c727e11de8dd52297c0e8b7580003dab8 100644 (file)
@@ -117,6 +117,31 @@ void synaptics_reset(struct psmouse *psmouse)
 }
 
 #ifdef CONFIG_MOUSE_PS2_SYNAPTICS
+struct min_max_quirk {
+       const char * const *pnp_ids;
+       int x_min, x_max, y_min, y_max;
+};
+
+static const struct min_max_quirk min_max_pnpid_table[] = {
+       {
+               (const char * const []){"LEN0033", NULL},
+               1024, 5052, 2258, 4832
+       },
+       {
+               (const char * const []){"LEN0035", "LEN0042", NULL},
+               1232, 5710, 1156, 4696
+       },
+       {
+               (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
+               1024, 5112, 2024, 4832
+       },
+       {
+               (const char * const []){"LEN2001", NULL},
+               1024, 5022, 2508, 4832
+       },
+       { }
+};
+
 /* This list has been kindly provided by Synaptics. */
 static const char * const topbuttonpad_pnp_ids[] = {
        "LEN0017",
@@ -129,7 +154,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN002D",
        "LEN002E",
        "LEN0033", /* Helix */
-       "LEN0034", /* T431s, T540, X1 Carbon 2nd */
+       "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
        "LEN0035", /* X240 */
        "LEN0036", /* T440 */
        "LEN0037",
@@ -142,7 +167,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN0048",
        "LEN0049",
        "LEN2000",
-       "LEN2001",
+       "LEN2001", /* Edge E431 */
        "LEN2002",
        "LEN2003",
        "LEN2004", /* L440 */
@@ -156,6 +181,18 @@ static const char * const topbuttonpad_pnp_ids[] = {
        NULL
 };
 
+static bool matches_pnp_id(struct psmouse *psmouse, const char * const ids[])
+{
+       int i;
+
+       if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4))
+               for (i = 0; ids[i]; i++)
+                       if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i]))
+                               return true;
+
+       return false;
+}
+
 /*****************************************************************************
  *     Synaptics communications functions
  ****************************************************************************/
@@ -304,20 +341,20 @@ static int synaptics_identify(struct psmouse *psmouse)
  * Resolution is left zero if touchpad does not support the query
  */
 
-static const int *quirk_min_max;
-
 static int synaptics_resolution(struct psmouse *psmouse)
 {
        struct synaptics_data *priv = psmouse->private;
        unsigned char resp[3];
+       int i;
 
-       if (quirk_min_max) {
-               priv->x_min = quirk_min_max[0];
-               priv->x_max = quirk_min_max[1];
-               priv->y_min = quirk_min_max[2];
-               priv->y_max = quirk_min_max[3];
-               return 0;
-       }
+       for (i = 0; min_max_pnpid_table[i].pnp_ids; i++)
+               if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
+                       priv->x_min = min_max_pnpid_table[i].x_min;
+                       priv->x_max = min_max_pnpid_table[i].x_max;
+                       priv->y_min = min_max_pnpid_table[i].y_min;
+                       priv->y_max = min_max_pnpid_table[i].y_max;
+                       return 0;
+               }
 
        if (SYN_ID_MAJOR(priv->identity) < 4)
                return 0;
@@ -1365,17 +1402,8 @@ static void set_input_params(struct psmouse *psmouse,
 
        if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
                __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
-               /* See if this buttonpad has a top button area */
-               if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) {
-                       for (i = 0; topbuttonpad_pnp_ids[i]; i++) {
-                               if (strstr(psmouse->ps2dev.serio->firmware_id,
-                                          topbuttonpad_pnp_ids[i])) {
-                                       __set_bit(INPUT_PROP_TOPBUTTONPAD,
-                                                 dev->propbit);
-                                       break;
-                               }
-                       }
-               }
+               if (matches_pnp_id(psmouse, topbuttonpad_pnp_ids))
+                       __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
                /* Clickpads report only left button */
                __clear_bit(BTN_RIGHT, dev->keybit);
                __clear_bit(BTN_MIDDLE, dev->keybit);
@@ -1547,96 +1575,10 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
        { }
 };
 
-static const struct dmi_system_id min_max_dmi_table[] __initconst = {
-#if defined(CONFIG_DMI)
-       {
-               /* Lenovo ThinkPad Helix */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
-               },
-               .driver_data = (int []){1024, 5052, 2258, 4832},
-       },
-       {
-               /* Lenovo ThinkPad X240 */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
-               },
-               .driver_data = (int []){1232, 5710, 1156, 4696},
-       },
-       {
-               /* Lenovo ThinkPad T431s */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
-               },
-               .driver_data = (int []){1024, 5112, 2024, 4832},
-       },
-       {
-               /* Lenovo ThinkPad T440s */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
-               },
-               .driver_data = (int []){1024, 5112, 2024, 4832},
-       },
-       {
-               /* Lenovo ThinkPad L440 */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
-               },
-               .driver_data = (int []){1024, 5112, 2024, 4832},
-       },
-       {
-               /* Lenovo ThinkPad T540p */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
-               },
-               .driver_data = (int []){1024, 5056, 2058, 4832},
-       },
-       {
-               /* Lenovo ThinkPad L540 */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
-               },
-               .driver_data = (int []){1024, 5112, 2024, 4832},
-       },
-       {
-               /* Lenovo Yoga S1 */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
-                                       "ThinkPad S1 Yoga"),
-               },
-               .driver_data = (int []){1232, 5710, 1156, 4696},
-       },
-       {
-               /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION,
-                                       "ThinkPad X1 Carbon 2nd"),
-               },
-               .driver_data = (int []){1024, 5112, 2024, 4832},
-       },
-#endif
-       { }
-};
-
 void __init synaptics_module_init(void)
 {
-       const struct dmi_system_id *min_max_dmi;
-
        impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
        broken_olpc_ec = dmi_check_system(olpc_dmi_table);
-
-       min_max_dmi = dmi_first_match(min_max_dmi_table);
-       if (min_max_dmi)
-               quirk_min_max = min_max_dmi->driver_data;
 }
 
 static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
index 762b08432de003dc155a7d78dce5572a3e352b33..8b748d99b934f4baa32674f074f8e43c153350b4 100644 (file)
@@ -79,7 +79,8 @@ static int amba_kmi_open(struct serio *io)
        writeb(divisor, KMICLKDIV);
        writeb(KMICR_EN, KMICR);
 
-       ret = request_irq(kmi->irq, amba_kmi_int, 0, "kmi-pl050", kmi);
+       ret = request_irq(kmi->irq, amba_kmi_int, IRQF_SHARED, "kmi-pl050",
+                         kmi);
        if (ret) {
                printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq);
                writeb(0, KMICR);
index 68edc9db2c6446c7534a0a5708d1b01e2c5ced98..b845e9370871bfdc0038ad8202b1d49bdddef121 100644 (file)
@@ -640,7 +640,7 @@ config TOUCHSCREEN_WM9713
 
 config TOUCHSCREEN_WM97XX_ATMEL
        tristate "WM97xx Atmel accelerated touch"
-       depends on TOUCHSCREEN_WM97XX && (AVR32 || ARCH_AT91)
+       depends on TOUCHSCREEN_WM97XX && AVR32
        help
          Say Y here for support for streaming mode with WM97xx touchscreens
          on Atmel AT91 or AVR32 systems with an AC97C module.
index c949520bd196ec47cf6e572e68cdd67c11b9f84f..57068e8035b5b2553141cb8f1a6ad27f334ea6ec 100644 (file)
@@ -3999,7 +3999,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
        iommu_flush_dte(iommu, devid);
        if (devid != alias) {
                irq_lookup_table[alias] = table;
-               set_dte_irq_entry(devid, table);
+               set_dte_irq_entry(alias, table);
                iommu_flush_dte(iommu, alias);
        }
 
index b76c58dbe30ce5ac38c8422b66b5ed2ea4f0fb1e..0e08545d72989e114f016751f68df5a3191088c0 100644 (file)
@@ -788,7 +788,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
                 * per device. But we can enable the exclusion range per
                 * device. This is done here
                 */
-               set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
+               set_dev_entry_bit(devid, DEV_ENTRY_EX);
                iommu->exclusion_start = m->range_start;
                iommu->exclusion_length = m->range_length;
        }
index 5208828792e603ad3f5effaa22b2e9c09ca0ac02..203b2e6a91cfca1184eb37df8761b339bc2a3728 100644 (file)
@@ -504,8 +504,10 @@ static void do_fault(struct work_struct *work)
 
        write = !!(fault->flags & PPR_FAULT_WRITE);
 
+       down_read(&fault->state->mm->mmap_sem);
        npages = get_user_pages(fault->state->task, fault->state->mm,
                                fault->address, 1, write, 0, &page, NULL);
+       up_read(&fault->state->mm->mmap_sem);
 
        if (npages == 1) {
                put_page(page);
index 9380be7b18954b9308ed42abe5fafa2f87c0f76a..5f054c44b485f27dec03bb5bed4c13e95dff18db 100644 (file)
@@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        ti->num_discard_bios = 1;
        ti->discards_supported = true;
        ti->discard_zeroes_data_unsupported = true;
+       /* Discard bios must be split on a block boundary */
+       ti->split_discard_bios = true;
 
        cache->features = ca->features;
        ti->per_bio_data_size = get_per_bio_data_size(cache);
index 784695d22fde1acaaf11acd78c7263438c04648e..53b213226c015ae29cd636c033a0c088776029aa 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/crypto.h>
 #include <linux/workqueue.h>
 #include <linux/backing-dev.h>
-#include <linux/percpu.h>
 #include <linux/atomic.h>
 #include <linux/scatterlist.h>
 #include <asm/page.h>
@@ -43,6 +42,7 @@ struct convert_context {
        struct bvec_iter iter_out;
        sector_t cc_sector;
        atomic_t cc_pending;
+       struct ablkcipher_request *req;
 };
 
 /*
@@ -111,15 +111,7 @@ struct iv_tcw_private {
 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
 
 /*
- * Duplicated per-CPU state for cipher.
- */
-struct crypt_cpu {
-       struct ablkcipher_request *req;
-};
-
-/*
- * The fields in here must be read only after initialization,
- * changing state should be in crypt_cpu.
+ * The fields in here must be read only after initialization.
  */
 struct crypt_config {
        struct dm_dev *dev;
@@ -150,12 +142,6 @@ struct crypt_config {
        sector_t iv_offset;
        unsigned int iv_size;
 
-       /*
-        * Duplicated per cpu state. Access through
-        * per_cpu_ptr() only.
-        */
-       struct crypt_cpu __percpu *cpu;
-
        /* ESSIV: struct crypto_cipher *essiv_tfm */
        void *iv_private;
        struct crypto_ablkcipher **tfms;
@@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *);
 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
 static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
 
-static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
-{
-       return this_cpu_ptr(cc->cpu);
-}
-
 /*
  * Use this to access cipher attributes that are the same for each CPU.
  */
@@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
 static void crypt_alloc_req(struct crypt_config *cc,
                            struct convert_context *ctx)
 {
-       struct crypt_cpu *this_cc = this_crypt_config(cc);
        unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
 
-       if (!this_cc->req)
-               this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
+       if (!ctx->req)
+               ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
 
-       ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
-       ablkcipher_request_set_callback(this_cc->req,
+       ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
+       ablkcipher_request_set_callback(ctx->req,
            CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
-           kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
+           kcryptd_async_done, dmreq_of_req(cc, ctx->req));
 }
 
 /*
@@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc,
 static int crypt_convert(struct crypt_config *cc,
                         struct convert_context *ctx)
 {
-       struct crypt_cpu *this_cc = this_crypt_config(cc);
        int r;
 
        atomic_set(&ctx->cc_pending, 1);
@@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc,
 
                atomic_inc(&ctx->cc_pending);
 
-               r = crypt_convert_block(cc, ctx, this_cc->req);
+               r = crypt_convert_block(cc, ctx, ctx->req);
 
                switch (r) {
                /* async */
@@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc,
                        reinit_completion(&ctx->restart);
                        /* fall through*/
                case -EINPROGRESS:
-                       this_cc->req = NULL;
+                       ctx->req = NULL;
                        ctx->cc_sector++;
                        continue;
 
@@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
        io->sector = sector;
        io->error = 0;
        io->base_io = NULL;
+       io->ctx.req = NULL;
        atomic_set(&io->io_pending, 0);
 
        return io;
@@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
        if (!atomic_dec_and_test(&io->io_pending))
                return;
 
+       if (io->ctx.req)
+               mempool_free(io->ctx.req, cc->req_pool);
        mempool_free(io, cc->io_pool);
 
        if (likely(!base_io))
@@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc)
 static void crypt_dtr(struct dm_target *ti)
 {
        struct crypt_config *cc = ti->private;
-       struct crypt_cpu *cpu_cc;
-       int cpu;
 
        ti->private = NULL;
 
@@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti)
        if (cc->crypt_queue)
                destroy_workqueue(cc->crypt_queue);
 
-       if (cc->cpu)
-               for_each_possible_cpu(cpu) {
-                       cpu_cc = per_cpu_ptr(cc->cpu, cpu);
-                       if (cpu_cc->req)
-                               mempool_free(cpu_cc->req, cc->req_pool);
-               }
-
        crypt_free_tfms(cc);
 
        if (cc->bs)
@@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti)
        if (cc->dev)
                dm_put_device(ti, cc->dev);
 
-       if (cc->cpu)
-               free_percpu(cc->cpu);
-
        kzfree(cc->cipher);
        kzfree(cc->cipher_string);
 
@@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
        if (tmp)
                DMWARN("Ignoring unexpected additional cipher options");
 
-       cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
-                                __alignof__(struct crypt_cpu));
-       if (!cc->cpu) {
-               ti->error = "Cannot allocate per cpu state";
-               goto bad_mem;
-       }
-
        /*
         * For compatibility with the original dm-crypt mapping format, if
         * only the cipher name is supplied, use cbc-plain.
index aa009e86587189a20a4ccf4734ad9d999cb5701f..ebfa411d1a7d4b0193596b515f7086639057b5d0 100644 (file)
@@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
        else
                m->saved_queue_if_no_path = queue_if_no_path;
        m->queue_if_no_path = queue_if_no_path;
-       if (!m->queue_if_no_path)
-               dm_table_run_md_queue_async(m->ti->table);
-
        spin_unlock_irqrestore(&m->lock, flags);
 
+       if (!queue_if_no_path)
+               dm_table_run_md_queue_async(m->ti->table);
+
        return 0;
 }
 
@@ -954,7 +954,7 @@ out:
  */
 static int reinstate_path(struct pgpath *pgpath)
 {
-       int r = 0;
+       int r = 0, run_queue = 0;
        unsigned long flags;
        struct multipath *m = pgpath->pg->m;
 
@@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath)
 
        if (!m->nr_valid_paths++) {
                m->current_pgpath = NULL;
-               dm_table_run_md_queue_async(m->ti->table);
+               run_queue = 1;
        } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
                if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
                        m->pg_init_in_progress++;
@@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath)
 
 out:
        spin_unlock_irqrestore(&m->lock, flags);
+       if (run_queue)
+               dm_table_run_md_queue_async(m->ti->table);
 
        return r;
 }
index 13abade76ad9bbd65c83c67d35f075506b17b63f..242ac2ea5f295c0bf2ad2db85fdd86812936851d 100644 (file)
@@ -27,6 +27,9 @@
 #define MAPPING_POOL_SIZE 1024
 #define PRISON_CELLS 1024
 #define COMMIT_PERIOD HZ
+#define NO_SPACE_TIMEOUT_SECS 60
+
+static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
 
 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
                "A percentage of time allocated for copy on write");
@@ -175,6 +178,7 @@ struct pool {
        struct workqueue_struct *wq;
        struct work_struct worker;
        struct delayed_work waker;
+       struct delayed_work no_space_timeout;
 
        unsigned long last_commit_jiffies;
        unsigned ref_count;
@@ -935,7 +939,7 @@ static int commit(struct pool *pool)
 {
        int r;
 
-       if (get_pool_mode(pool) != PM_WRITE)
+       if (get_pool_mode(pool) >= PM_READ_ONLY)
                return -EINVAL;
 
        r = dm_pool_commit_metadata(pool->pmd);
@@ -1590,6 +1594,20 @@ static void do_waker(struct work_struct *ws)
        queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
 }
 
+/*
+ * We're holding onto IO to allow userland time to react.  After the
+ * timeout either the pool will have been resized (and thus back in
+ * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
+ */
+static void do_no_space_timeout(struct work_struct *ws)
+{
+       struct pool *pool = container_of(to_delayed_work(ws), struct pool,
+                                        no_space_timeout);
+
+       if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
+               set_pool_mode(pool, PM_READ_ONLY);
+}
+
 /*----------------------------------------------------------------*/
 
 struct noflush_work {
@@ -1654,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
        struct pool_c *pt = pool->ti->private;
        bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
        enum pool_mode old_mode = get_pool_mode(pool);
+       unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
 
        /*
         * Never allow the pool to transition to PM_WRITE mode if user
@@ -1715,6 +1734,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
                pool->process_discard = process_discard;
                pool->process_prepared_mapping = process_prepared_mapping;
                pool->process_prepared_discard = process_prepared_discard_passdown;
+
+               if (!pool->pf.error_if_no_space && no_space_timeout)
+                       queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
                break;
 
        case PM_WRITE:
@@ -2100,6 +2122,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
 
        INIT_WORK(&pool->worker, do_worker);
        INIT_DELAYED_WORK(&pool->waker, do_waker);
+       INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
        spin_lock_init(&pool->lock);
        bio_list_init(&pool->deferred_flush_bios);
        INIT_LIST_HEAD(&pool->prepared_mappings);
@@ -2662,6 +2685,7 @@ static void pool_postsuspend(struct dm_target *ti)
        struct pool *pool = pt->pool;
 
        cancel_delayed_work(&pool->waker);
+       cancel_delayed_work(&pool->no_space_timeout);
        flush_workqueue(pool->wq);
        (void) commit(pool);
 }
@@ -3487,6 +3511,9 @@ static void dm_thin_exit(void)
 module_init(dm_thin_init);
 module_exit(dm_thin_exit);
 
+module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
+
 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
 MODULE_LICENSE("GPL");
index 8fda38d23e3847aa4d96ecd147e996514a5a4af7..237b7e0ddc7ae2617af41cb1fb9dc0cc195f44ab 100644 (file)
@@ -8516,7 +8516,8 @@ static int md_notify_reboot(struct notifier_block *this,
                if (mddev_trylock(mddev)) {
                        if (mddev->pers)
                                __md_stop_writes(mddev);
-                       mddev->safemode = 2;
+                       if (mddev->persistent)
+                               mddev->safemode = 2;
                        mddev_unlock(mddev);
                }
                need_delay = 1;
index 33fc408e5eacef0a1dce55fd5c0d578fc244b663..cb882aae9e20d4f7032a400884f45f50de57528f 100644 (file)
@@ -1172,6 +1172,13 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
        int max_sectors;
        int sectors;
 
+       /*
+        * Register the new request and wait if the reconstruction
+        * thread has put up a bar for new requests.
+        * Continue immediately if no resync is active currently.
+        */
+       wait_barrier(conf);
+
        sectors = bio_sectors(bio);
        while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
            bio->bi_iter.bi_sector < conf->reshape_progress &&
@@ -1552,12 +1559,6 @@ static void make_request(struct mddev *mddev, struct bio *bio)
 
        md_write_start(mddev, bio);
 
-       /*
-        * Register the new request and wait if the reconstruction
-        * thread has put up a bar for new requests.
-        * Continue immediately if no resync is active currently.
-        */
-       wait_barrier(conf);
 
        do {
 
index e8a1ce204036f45cca1d1d495f29c8c7b72942d5..cdd7c1b7259b008e873cc711294a3f0cecba422a 100644 (file)
@@ -1109,7 +1109,7 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
         * windows that fall outside that.
         */
        for (i = 0; i < n_win_sizes; i++) {
-               struct ov7670_win_size *win = &info->devtype->win_sizes[index];
+               struct ov7670_win_size *win = &info->devtype->win_sizes[i];
                if (info->min_width && win->width < info->min_width)
                        continue;
                if (info->min_height && win->height < info->min_height)
index a4459301b5f829efcae8563241ef1108933194d7..ee0f57e01b5677df58c9f74565767bcaf57d54af 100644 (file)
@@ -1616,7 +1616,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
        if (ret < 0)
                return -EINVAL;
 
-       node_ep = v4l2_of_get_next_endpoint(node, NULL);
+       node_ep = of_graph_get_next_endpoint(node, NULL);
        if (!node_ep) {
                dev_warn(dev, "no endpoint defined for node: %s\n",
                                                node->full_name);
index d5a7a135f75d39d5bc2f6fb3366639dd6dc7ee34..703560fa5e73b456cbc0951ccb61957bdfecebac 100644 (file)
@@ -93,6 +93,7 @@ static long media_device_enum_entities(struct media_device *mdev,
        struct media_entity *ent;
        struct media_entity_desc u_ent;
 
+       memset(&u_ent, 0, sizeof(u_ent));
        if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
                return -EFAULT;
 
index b4f12d00be059c3f381b31295d435074ee291dab..65670825296209917cc33546d1d43abb60af5ed5 100644 (file)
@@ -372,18 +372,32 @@ static int vpbe_stop_streaming(struct vb2_queue *vq)
 {
        struct vpbe_fh *fh = vb2_get_drv_priv(vq);
        struct vpbe_layer *layer = fh->layer;
+       struct vpbe_display *disp = fh->disp_dev;
+       unsigned long flags;
 
        if (!vb2_is_streaming(vq))
                return 0;
 
        /* release all active buffers */
+       spin_lock_irqsave(&disp->dma_queue_lock, flags);
+       if (layer->cur_frm == layer->next_frm) {
+               vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR);
+       } else {
+               if (layer->cur_frm != NULL)
+                       vb2_buffer_done(&layer->cur_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+               if (layer->next_frm != NULL)
+                       vb2_buffer_done(&layer->next_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+       }
+
        while (!list_empty(&layer->dma_queue)) {
                layer->next_frm = list_entry(layer->dma_queue.next,
                                                struct vpbe_disp_buffer, list);
                list_del(&layer->next_frm->list);
                vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR);
        }
-
+       spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
        return 0;
 }
 
index d762246eabf5a3b78a8c348e7a5e5f149580e143..0379cb9f9a9c25c7b0a4efb52e741d4723ddcdc4 100644 (file)
@@ -734,6 +734,8 @@ static int vpfe_release(struct file *file)
                }
                vpfe_dev->io_usrs = 0;
                vpfe_dev->numbuffers = config_params.numbuffers;
+               videobuf_stop(&vpfe_dev->buffer_queue);
+               videobuf_mmap_free(&vpfe_dev->buffer_queue);
        }
 
        /* Decrement device usrs counter */
index 756da78bac23109dbd3e7da464243da381354231..8dea0b84a3ad66788ab29b2437303ebd3bc38d6f 100644 (file)
@@ -358,8 +358,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
 
        common = &ch->common[VPIF_VIDEO_INDEX];
 
+       /* Disable channel as per its device type and channel id */
+       if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
+               enable_channel0(0);
+               channel0_intr_enable(0);
+       }
+       if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
+               (2 == common->started)) {
+               enable_channel1(0);
+               channel1_intr_enable(0);
+       }
+       common->started = 0;
+
        /* release all active buffers */
        spin_lock_irqsave(&common->irqlock, flags);
+       if (common->cur_frm == common->next_frm) {
+               vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
+       } else {
+               if (common->cur_frm != NULL)
+                       vb2_buffer_done(&common->cur_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+               if (common->next_frm != NULL)
+                       vb2_buffer_done(&common->next_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+       }
+
        while (!list_empty(&common->dma_queue)) {
                common->next_frm = list_entry(common->dma_queue.next,
                                                struct vpif_cap_buffer, list);
@@ -933,17 +956,6 @@ static int vpif_release(struct file *filep)
        if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
                /* Reset io_usrs member of channel object */
                common->io_usrs = 0;
-               /* Disable channel as per its device type and channel id */
-               if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
-                       enable_channel0(0);
-                       channel0_intr_enable(0);
-               }
-               if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
-                   (2 == common->started)) {
-                       enable_channel1(0);
-                       channel1_intr_enable(0);
-               }
-               common->started = 0;
                /* Free buffers allocated */
                vb2_queue_release(&common->buffer_queue);
                vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
index 0ac841e35aa48dd59bdf17b32f97afbe94b561b5..aed41edd050102e89248cecbb2607a4f45e6eb12 100644 (file)
@@ -320,8 +320,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
 
        common = &ch->common[VPIF_VIDEO_INDEX];
 
+       /* Disable channel */
+       if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
+               enable_channel2(0);
+               channel2_intr_enable(0);
+       }
+       if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
+               (2 == common->started)) {
+               enable_channel3(0);
+               channel3_intr_enable(0);
+       }
+       common->started = 0;
+
        /* release all active buffers */
        spin_lock_irqsave(&common->irqlock, flags);
+       if (common->cur_frm == common->next_frm) {
+               vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
+       } else {
+               if (common->cur_frm != NULL)
+                       vb2_buffer_done(&common->cur_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+               if (common->next_frm != NULL)
+                       vb2_buffer_done(&common->next_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+       }
+
        while (!list_empty(&common->dma_queue)) {
                common->next_frm = list_entry(common->dma_queue.next,
                                                struct vpif_disp_buffer, list);
@@ -773,18 +796,6 @@ static int vpif_release(struct file *filep)
        if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
                /* Reset io_usrs member of channel object */
                common->io_usrs = 0;
-               /* Disable channel */
-               if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
-                       enable_channel2(0);
-                       channel2_intr_enable(0);
-               }
-               if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
-                   (2 == common->started)) {
-                       enable_channel3(0);
-                       channel3_intr_enable(0);
-               }
-               common->started = 0;
-
                /* Free buffers allocated */
                vb2_queue_release(&common->buffer_queue);
                vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
index da2fc86cc52433bd8f1c6b32a898baa87c2a1064..25dbf5b05a96186527cf520f568f03721b181a80 100644 (file)
@@ -122,7 +122,7 @@ static struct fimc_fmt fimc_formats[] = {
        }, {
                .name           = "YUV 4:2:2 planar, Y/Cb/Cr",
                .fourcc         = V4L2_PIX_FMT_YUV422P,
-               .depth          = { 12 },
+               .depth          = { 16 },
                .color          = FIMC_FMT_YCBYCR422,
                .memplanes      = 1,
                .colplanes      = 3,
index 3aecaf4650942429eba75ee89cc33180aac4ae07..f0c9c42867de2e8cad570229f8592a6b445f1b47 100644 (file)
@@ -195,7 +195,7 @@ static int fc2580_set_params(struct dvb_frontend *fe)
 
        f_ref = 2UL * priv->cfg->clock / r_val;
        n_val = div_u64_rem(f_vco, f_ref, &k_val);
-       k_val_reg = 1UL * k_val * (1 << 20) / f_ref;
+       k_val_reg = div_u64(1ULL * k_val * (1 << 20), f_ref);
 
        ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff));
        if (ret < 0)
@@ -348,8 +348,8 @@ static int fc2580_set_params(struct dvb_frontend *fe)
        if (ret < 0)
                goto err;
 
-       ret = fc2580_wr_reg(priv, 0x37, 1UL * priv->cfg->clock * \
-                       fc2580_if_filter_lut[i].mul / 1000000000);
+       ret = fc2580_wr_reg(priv, 0x37, div_u64(1ULL * priv->cfg->clock *
+                       fc2580_if_filter_lut[i].mul, 1000000000));
        if (ret < 0)
                goto err;
 
index be38a9e637e08d20f673cb0c8de85fea608442d8..646c994521361ba7579466d814a8e7d2422397b5 100644 (file)
@@ -22,6 +22,7 @@
 #define FC2580_PRIV_H
 
 #include "fc2580.h"
+#include <linux/math64.h>
 
 struct fc2580_reg_val {
        u8 reg;
index 7407b8338ccfa33ce6a4179e5b9e99632a3f6ebf..bc38f03394cda0e1a397b390f354b620fe486a73 100644 (file)
@@ -41,4 +41,3 @@ ccflags-y += -I$(srctree)/drivers/media/dvb-core
 ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
 ccflags-y += -I$(srctree)/drivers/media/tuners
 ccflags-y += -I$(srctree)/drivers/media/common
-ccflags-y += -I$(srctree)/drivers/staging/media/rtl2832u_sdr
index 61d196e8b3abde6dc0d97e26fe3ca7cae9292957..dcbd392e6efc8f38265d8b7fd805c4b38eff9410 100644 (file)
@@ -24,7 +24,6 @@
 
 #include "rtl2830.h"
 #include "rtl2832.h"
-#include "rtl2832_sdr.h"
 
 #include "qt1010.h"
 #include "mt2060.h"
 #include "tua9001.h"
 #include "r820t.h"
 
+/*
+ * RTL2832_SDR module is in staging. That logic is added in order to avoid any
+ * hard dependency to drivers/staging/ directory as we want compile mainline
+ * driver even whole staging directory is missing.
+ */
+#include <media/v4l2-subdev.h>
+
+#if IS_ENABLED(CONFIG_DVB_RTL2832_SDR)
+struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
+       struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
+       struct v4l2_subdev *sd);
+#else
+static inline struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
+       struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
+       struct v4l2_subdev *sd)
+{
+       return NULL;
+}
+#endif
+
+#ifdef CONFIG_MEDIA_ATTACH
+#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
+       void *__r = NULL; \
+       typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
+       if (__a) { \
+               __r = (void *) __a(ARGS); \
+               if (__r == NULL) \
+                       symbol_put(FUNCTION); \
+       } \
+       __r; \
+})
+
+#else
+#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
+       FUNCTION(ARGS); \
+})
+
+#endif
+
 static int rtl28xxu_disable_rc;
 module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644);
 MODULE_PARM_DESC(disable_rc, "disable RTL2832U remote controller");
@@ -908,7 +946,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
                                adap->fe[0]->ops.tuner_ops.get_rf_strength;
 
                /* attach SDR */
-               dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
+               dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
                                &rtl28xxu_rtl2832_fc0012_config, NULL);
                break;
        case TUNER_RTL2832_FC0013:
@@ -920,7 +958,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
                                adap->fe[0]->ops.tuner_ops.get_rf_strength;
 
                /* attach SDR */
-               dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
+               dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
                                &rtl28xxu_rtl2832_fc0013_config, NULL);
                break;
        case TUNER_RTL2832_E4000: {
@@ -951,7 +989,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
                        i2c_set_adapdata(i2c_adap_internal, d);
 
                        /* attach SDR */
-                       dvb_attach(rtl2832_sdr_attach, adap->fe[0],
+                       dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0],
                                        i2c_adap_internal,
                                        &rtl28xxu_rtl2832_e4000_config, sd);
                }
@@ -982,7 +1020,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
                                adap->fe[0]->ops.tuner_ops.get_rf_strength;
 
                /* attach SDR */
-               dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
+               dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
                                &rtl28xxu_rtl2832_r820t_config, NULL);
                break;
        case TUNER_RTL2832_R828D:
index 7277dbd2afcdb8c88629aafc2c9013b38cd79390..ecbcb39feb71ad21f7b765c36ac20858234d01f6 100644 (file)
@@ -1430,10 +1430,8 @@ static const struct usb_device_id device_table[] = {
        {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)},
        {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)},
        {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)},
-#if !IS_ENABLED(CONFIG_USB_SN9C102)
        {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
        {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
-#endif
        {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */
        {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)},
        {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)},
index 04b2daf567bec232d384337491543151d890c69a..7e2411c36419c394b3d23a81eb5bdd4b5fee88c2 100644 (file)
@@ -178,6 +178,9 @@ struct v4l2_create_buffers32 {
 
 static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
 {
+       if (get_user(kp->type, &up->type))
+               return -EFAULT;
+
        switch (kp->type) {
        case V4L2_BUF_TYPE_VIDEO_CAPTURE:
        case V4L2_BUF_TYPE_VIDEO_OUTPUT:
@@ -204,17 +207,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
 
 static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
 {
-       if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
-                       get_user(kp->type, &up->type))
-                       return -EFAULT;
+       if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
+               return -EFAULT;
        return __get_v4l2_format32(kp, up);
 }
 
 static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
 {
        if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
-           copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt)))
-                       return -EFAULT;
+           copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
+               return -EFAULT;
        return __get_v4l2_format32(&kp->format, &up->format);
 }
 
index 110c03627051cb749ef92d853e47322714b1dd51..b59a17fb7c3e3f3eff43faecac3610f58df1dbd8 100644 (file)
@@ -108,8 +108,19 @@ static int devbus_set_timing_params(struct devbus *devbus,
                        node->full_name);
                return err;
        }
-       /* Convert bit width to byte width */
-       r.bus_width /= 8;
+
+       /*
+        * The bus width is encoded into the register as 0 for 8 bits,
+        * and 1 for 16 bits, so we do the necessary conversion here.
+        */
+       if (r.bus_width == 8)
+               r.bus_width = 0;
+       else if (r.bus_width == 16)
+               r.bus_width = 1;
+       else {
+               dev_err(devbus->dev, "invalid bus width %d\n", r.bus_width);
+               return -EINVAL;
+       }
 
        err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
                                 &r.badr_skew);
index c9de3d598ea515279ff0f79f1214984b041266db..1d15735f9ef930ed18e384b1c63b7deb1fd42981 100644 (file)
@@ -338,28 +338,58 @@ int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
                int num_sg, bool read, int timeout)
 {
        struct completion trans_done;
-       int err = 0, count;
+       u8 dir;
+       int err = 0, i, count;
        long timeleft;
        unsigned long flags;
+       struct scatterlist *sg;
+       enum dma_data_direction dma_dir;
+       u32 val;
+       dma_addr_t addr;
+       unsigned int len;
+
+       dev_dbg(&(pcr->pci->dev), "--> %s: num_sg = %d\n", __func__, num_sg);
+
+       /* don't transfer data during abort processing */
+       if (pcr->remove_pci)
+               return -EINVAL;
+
+       if ((sglist == NULL) || (num_sg <= 0))
+               return -EINVAL;
 
-       count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
+       if (read) {
+               dir = DEVICE_TO_HOST;
+               dma_dir = DMA_FROM_DEVICE;
+       } else {
+               dir = HOST_TO_DEVICE;
+               dma_dir = DMA_TO_DEVICE;
+       }
+
+       count = dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
        if (count < 1) {
                dev_err(&(pcr->pci->dev), "scatterlist map failed\n");
                return -EINVAL;
        }
        dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count);
 
+       val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
+       pcr->sgi = 0;
+       for_each_sg(sglist, sg, count, i) {
+               addr = sg_dma_address(sg);
+               len = sg_dma_len(sg);
+               rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
+       }
 
        spin_lock_irqsave(&pcr->lock, flags);
 
        pcr->done = &trans_done;
        pcr->trans_result = TRANS_NOT_READY;
        init_completion(&trans_done);
+       rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
+       rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
 
        spin_unlock_irqrestore(&pcr->lock, flags);
 
-       rtsx_pci_dma_transfer(pcr, sglist, count, read);
-
        timeleft = wait_for_completion_interruptible_timeout(
                        &trans_done, msecs_to_jiffies(timeout));
        if (timeleft <= 0) {
@@ -383,7 +413,7 @@ out:
        pcr->done = NULL;
        spin_unlock_irqrestore(&pcr->lock, flags);
 
-       rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
+       dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
 
        if ((err < 0) && (err != -ENODEV))
                rtsx_pci_stop_cmd(pcr);
@@ -395,73 +425,6 @@ out:
 }
 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
 
-int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int num_sg, bool read)
-{
-       enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
-       if (pcr->remove_pci)
-               return -EINVAL;
-
-       if ((sglist == NULL) || num_sg < 1)
-               return -EINVAL;
-
-       return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
-
-int rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int num_sg, bool read)
-{
-       enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
-       if (pcr->remove_pci)
-               return -EINVAL;
-
-       if (sglist == NULL || num_sg < 1)
-               return -EINVAL;
-
-       dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
-       return num_sg;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
-
-int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int sg_count, bool read)
-{
-       struct scatterlist *sg;
-       dma_addr_t addr;
-       unsigned int len;
-       int i;
-       u32 val;
-       u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
-       unsigned long flags;
-
-       if (pcr->remove_pci)
-               return -EINVAL;
-
-       if ((sglist == NULL) || (sg_count < 1))
-               return -EINVAL;
-
-       val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
-       pcr->sgi = 0;
-       for_each_sg(sglist, sg, sg_count, i) {
-               addr = sg_dma_address(sg);
-               len = sg_dma_len(sg);
-               rtsx_pci_add_sg_tbl(pcr, addr, len, i == sg_count - 1);
-       }
-
-       spin_lock_irqsave(&pcr->lock, flags);
-
-       rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
-       rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
-
-       spin_unlock_irqrestore(&pcr->lock, flags);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
-
 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
 {
        int err;
@@ -873,8 +836,6 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
        int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
        /* Clear interrupt flag */
        rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
-       dev_dbg(&pcr->pci->dev, "=========== BIPR 0x%8x ==========\n", int_reg);
-
        if ((int_reg & pcr->bier) == 0) {
                spin_unlock(&pcr->lock);
                return IRQ_NONE;
@@ -905,28 +866,17 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
        }
 
        if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
-               if (int_reg & (TRANS_FAIL_INT | DELINK_INT))
+               if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
                        pcr->trans_result = TRANS_RESULT_FAIL;
-               else if (int_reg & TRANS_OK_INT)
+                       if (pcr->done)
+                               complete(pcr->done);
+               } else if (int_reg & TRANS_OK_INT) {
                        pcr->trans_result = TRANS_RESULT_OK;
-
-               if (pcr->done)
-                       complete(pcr->done);
-
-               if (int_reg & SD_EXIST) {
-                       struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD];
-                       if (slot && slot->done_transfer)
-                               slot->done_transfer(slot->p_dev);
-               }
-
-               if (int_reg & MS_EXIST) {
-                       struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD];
-                       if (slot && slot->done_transfer)
-                               slot->done_transfer(slot->p_dev);
+                       if (pcr->done)
+                               complete(pcr->done);
                }
        }
 
-
        if (pcr->card_inserted || pcr->card_removed)
                schedule_delayed_work(&pcr->carddet_work,
                                msecs_to_jiffies(200));
index 5fb994f9a653570d75f2b3deb6aab32129d17503..0b9ded13a3ae89d72a94e7e8e075f62496483f0e 100644 (file)
 #include <linux/mfd/rtsx_pci.h>
 #include <asm/unaligned.h>
 
-struct realtek_next {
-       unsigned int    sg_count;
-       s32             cookie;
-};
-
 struct realtek_pci_sdmmc {
        struct platform_device  *pdev;
        struct rtsx_pcr         *pcr;
        struct mmc_host         *mmc;
        struct mmc_request      *mrq;
-       struct mmc_command      *cmd;
-       struct mmc_data         *data;
-
-       spinlock_t              lock;
-       struct timer_list       timer;
-       struct tasklet_struct   cmd_tasklet;
-       struct tasklet_struct   data_tasklet;
-       struct tasklet_struct   finish_tasklet;
-
-       u8                      rsp_type;
-       u8                      rsp_len;
-       int                     sg_count;
+
+       struct mutex            host_mutex;
+
        u8                      ssc_depth;
        unsigned int            clock;
        bool                    vpclk;
@@ -62,13 +48,8 @@ struct realtek_pci_sdmmc {
        int                     power_state;
 #define SDMMC_POWER_ON         1
 #define SDMMC_POWER_OFF                0
-
-       struct realtek_next     next_data;
 };
 
-static int sd_start_multi_rw(struct realtek_pci_sdmmc *host,
-               struct mmc_request *mrq);
-
 static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host)
 {
        return &(host->pdev->dev);
@@ -105,95 +86,6 @@ static void sd_print_debug_regs(struct realtek_pci_sdmmc *host)
 #define sd_print_debug_regs(host)
 #endif /* DEBUG */
 
-static void sd_isr_done_transfer(struct platform_device *pdev)
-{
-       struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
-
-       spin_lock(&host->lock);
-       if (host->cmd)
-               tasklet_schedule(&host->cmd_tasklet);
-       if (host->data)
-               tasklet_schedule(&host->data_tasklet);
-       spin_unlock(&host->lock);
-}
-
-static void sd_request_timeout(unsigned long host_addr)
-{
-       struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
-       unsigned long flags;
-
-       spin_lock_irqsave(&host->lock, flags);
-
-       if (!host->mrq) {
-               dev_err(sdmmc_dev(host), "error: no request exist\n");
-               goto out;
-       }
-
-       if (host->cmd)
-               host->cmd->error = -ETIMEDOUT;
-       if (host->data)
-               host->data->error = -ETIMEDOUT;
-
-       dev_dbg(sdmmc_dev(host), "timeout for request\n");
-
-out:
-       tasklet_schedule(&host->finish_tasklet);
-       spin_unlock_irqrestore(&host->lock, flags);
-}
-
-static void sd_finish_request(unsigned long host_addr)
-{
-       struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
-       struct rtsx_pcr *pcr = host->pcr;
-       struct mmc_request *mrq;
-       struct mmc_command *cmd;
-       struct mmc_data *data;
-       unsigned long flags;
-       bool any_error;
-
-       spin_lock_irqsave(&host->lock, flags);
-
-       del_timer(&host->timer);
-       mrq = host->mrq;
-       if (!mrq) {
-               dev_err(sdmmc_dev(host), "error: no request need finish\n");
-               goto out;
-       }
-
-       cmd = mrq->cmd;
-       data = mrq->data;
-
-       any_error = (mrq->sbc && mrq->sbc->error) ||
-               (mrq->stop && mrq->stop->error) ||
-               (cmd && cmd->error) || (data && data->error);
-
-       if (any_error) {
-               rtsx_pci_stop_cmd(pcr);
-               sd_clear_error(host);
-       }
-
-       if (data) {
-               if (any_error)
-                       data->bytes_xfered = 0;
-               else
-                       data->bytes_xfered = data->blocks * data->blksz;
-
-               if (!data->host_cookie)
-                       rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len,
-                                       data->flags & MMC_DATA_READ);
-
-       }
-
-       host->mrq = NULL;
-       host->cmd = NULL;
-       host->data = NULL;
-
-out:
-       spin_unlock_irqrestore(&host->lock, flags);
-       mutex_unlock(&pcr->pcr_mutex);
-       mmc_request_done(host->mmc, mrq);
-}
-
 static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
                u8 *buf, int buf_len, int timeout)
 {
@@ -311,7 +203,8 @@ static int sd_write_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
        return 0;
 }
 
-static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
+static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
+               struct mmc_command *cmd)
 {
        struct rtsx_pcr *pcr = host->pcr;
        u8 cmd_idx = (u8)cmd->opcode;
@@ -319,14 +212,11 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
        int err = 0;
        int timeout = 100;
        int i;
+       u8 *ptr;
+       int stat_idx = 0;
        u8 rsp_type;
        int rsp_len = 5;
-       unsigned long flags;
-
-       if (host->cmd)
-               dev_err(sdmmc_dev(host), "error: cmd already exist\n");
-
-       host->cmd = cmd;
+       bool clock_toggled = false;
 
        dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
                        __func__, cmd_idx, arg);
@@ -361,8 +251,6 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
                err = -EINVAL;
                goto out;
        }
-       host->rsp_type = rsp_type;
-       host->rsp_len = rsp_len;
 
        if (rsp_type == SD_RSP_TYPE_R1b)
                timeout = 3000;
@@ -372,6 +260,8 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
                                0xFF, SD_CLK_TOGGLE_EN);
                if (err < 0)
                        goto out;
+
+               clock_toggled = true;
        }
 
        rtsx_pci_init_cmd(pcr);
@@ -395,60 +285,25 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
                /* Read data from ping-pong buffer */
                for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++)
                        rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
+               stat_idx = 16;
        } else if (rsp_type != SD_RSP_TYPE_R0) {
                /* Read data from SD_CMDx registers */
                for (i = SD_CMD0; i <= SD_CMD4; i++)
                        rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
+               stat_idx = 5;
        }
 
        rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0);
 
-       mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout));
-
-       spin_lock_irqsave(&pcr->lock, flags);
-       pcr->trans_result = TRANS_NOT_READY;
-       rtsx_pci_send_cmd_no_wait(pcr);
-       spin_unlock_irqrestore(&pcr->lock, flags);
-
-       return;
-
-out:
-       cmd->error = err;
-       tasklet_schedule(&host->finish_tasklet);
-}
-
-static void sd_get_rsp(unsigned long host_addr)
-{
-       struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
-       struct rtsx_pcr *pcr = host->pcr;
-       struct mmc_command *cmd;
-       int i, err = 0, stat_idx;
-       u8 *ptr, rsp_type;
-       unsigned long flags;
-
-       spin_lock_irqsave(&host->lock, flags);
-
-       cmd = host->cmd;
-       host->cmd = NULL;
-
-       if (!cmd) {
-               dev_err(sdmmc_dev(host), "error: cmd not exist\n");
+       err = rtsx_pci_send_cmd(pcr, timeout);
+       if (err < 0) {
+               sd_print_debug_regs(host);
+               sd_clear_error(host);
+               dev_dbg(sdmmc_dev(host),
+                       "rtsx_pci_send_cmd error (err = %d)\n", err);
                goto out;
        }
 
-       spin_lock(&pcr->lock);
-       if (pcr->trans_result == TRANS_NO_DEVICE)
-               err = -ENODEV;
-       else if (pcr->trans_result != TRANS_RESULT_OK)
-               err = -EINVAL;
-       spin_unlock(&pcr->lock);
-
-       if (err < 0)
-               goto out;
-
-       rsp_type = host->rsp_type;
-       stat_idx = host->rsp_len;
-
        if (rsp_type == SD_RSP_TYPE_R0) {
                err = 0;
                goto out;
@@ -485,106 +340,26 @@ static void sd_get_rsp(unsigned long host_addr)
                                cmd->resp[0]);
        }
 
-       if (cmd == host->mrq->sbc) {
-               sd_send_cmd(host, host->mrq->cmd);
-               spin_unlock_irqrestore(&host->lock, flags);
-               return;
-       }
-
-       if (cmd == host->mrq->stop)
-               goto out;
-
-       if (cmd->data) {
-               sd_start_multi_rw(host, host->mrq);
-               spin_unlock_irqrestore(&host->lock, flags);
-               return;
-       }
-
 out:
        cmd->error = err;
 
-       tasklet_schedule(&host->finish_tasklet);
-       spin_unlock_irqrestore(&host->lock, flags);
-}
-
-static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host,
-                       struct mmc_data *data, struct realtek_next *next)
-{
-       struct rtsx_pcr *pcr = host->pcr;
-       int read = data->flags & MMC_DATA_READ;
-       int sg_count = 0;
-
-       if (!next && data->host_cookie &&
-               data->host_cookie != host->next_data.cookie) {
-               dev_err(sdmmc_dev(host),
-                       "error: invalid cookie data[%d] host[%d]\n",
-                       data->host_cookie, host->next_data.cookie);
-               data->host_cookie = 0;
-       }
-
-       if (next || (!next && data->host_cookie != host->next_data.cookie))
-               sg_count = rtsx_pci_dma_map_sg(pcr,
-                               data->sg, data->sg_len, read);
-       else
-               sg_count = host->next_data.sg_count;
-
-       if (next) {
-               next->sg_count = sg_count;
-               if (++next->cookie < 0)
-                       next->cookie = 1;
-               data->host_cookie = next->cookie;
-       }
-
-       return sg_count;
-}
-
-static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
-               bool is_first_req)
-{
-       struct realtek_pci_sdmmc *host = mmc_priv(mmc);
-       struct mmc_data *data = mrq->data;
-
-       if (data->host_cookie) {
-               dev_err(sdmmc_dev(host),
-                       "error: descard already cookie data[%d]\n",
-                       data->host_cookie);
-               data->host_cookie = 0;
-       }
-
-       dev_dbg(sdmmc_dev(host), "dma sg prepared: %d\n",
-               sd_pre_dma_transfer(host, data, &host->next_data));
-}
-
-static void sdmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
-               int err)
-{
-       struct realtek_pci_sdmmc *host = mmc_priv(mmc);
-       struct rtsx_pcr *pcr = host->pcr;
-       struct mmc_data *data = mrq->data;
-       int read = data->flags & MMC_DATA_READ;
-
-       rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len, read);
-       data->host_cookie = 0;
+       if (err && clock_toggled)
+               rtsx_pci_write_register(pcr, SD_BUS_STAT,
+                               SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
 }
 
-static int sd_start_multi_rw(struct realtek_pci_sdmmc *host,
-               struct mmc_request *mrq)
+static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
 {
        struct rtsx_pcr *pcr = host->pcr;
        struct mmc_host *mmc = host->mmc;
        struct mmc_card *card = mmc->card;
        struct mmc_data *data = mrq->data;
        int uhs = mmc_card_uhs(card);
-       int read = data->flags & MMC_DATA_READ;
+       int read = (data->flags & MMC_DATA_READ) ? 1 : 0;
        u8 cfg2, trans_mode;
        int err;
        size_t data_len = data->blksz * data->blocks;
 
-       if (host->data)
-               dev_err(sdmmc_dev(host), "error: data already exist\n");
-
-       host->data = data;
-
        if (read) {
                cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
                        SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0;
@@ -635,54 +410,15 @@ static int sd_start_multi_rw(struct realtek_pci_sdmmc *host,
        rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
                        SD_TRANSFER_END, SD_TRANSFER_END);
 
-       mod_timer(&host->timer, jiffies + 10 * HZ);
        rtsx_pci_send_cmd_no_wait(pcr);
 
-       err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, read);
-       if (err < 0) {
-               data->error = err;
-               tasklet_schedule(&host->finish_tasklet);
-       }
-       return 0;
-}
-
-static void sd_finish_multi_rw(unsigned long host_addr)
-{
-       struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
-       struct rtsx_pcr *pcr = host->pcr;
-       struct mmc_data *data;
-       int err = 0;
-       unsigned long flags;
-
-       spin_lock_irqsave(&host->lock, flags);
-
-       if (!host->data) {
-               dev_err(sdmmc_dev(host), "error: no data exist\n");
-               goto out;
-       }
-
-       data = host->data;
-       host->data = NULL;
-
-       if (pcr->trans_result == TRANS_NO_DEVICE)
-               err = -ENODEV;
-       else if (pcr->trans_result != TRANS_RESULT_OK)
-               err = -EINVAL;
-
+       err = rtsx_pci_transfer_data(pcr, data->sg, data->sg_len, read, 10000);
        if (err < 0) {
-               data->error = err;
-               goto out;
-       }
-
-       if (!host->mrq->sbc && data->stop) {
-               sd_send_cmd(host, data->stop);
-               spin_unlock_irqrestore(&host->lock, flags);
-               return;
+               sd_clear_error(host);
+               return err;
        }
 
-out:
-       tasklet_schedule(&host->finish_tasklet);
-       spin_unlock_irqrestore(&host->lock, flags);
+       return 0;
 }
 
 static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
@@ -901,13 +637,6 @@ static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode)
        return 0;
 }
 
-static inline bool sd_use_muti_rw(struct mmc_command *cmd)
-{
-       return mmc_op_multi(cmd->opcode) ||
-               (cmd->opcode == MMC_READ_SINGLE_BLOCK) ||
-               (cmd->opcode == MMC_WRITE_BLOCK);
-}
-
 static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
        struct realtek_pci_sdmmc *host = mmc_priv(mmc);
@@ -916,14 +645,6 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
        struct mmc_data *data = mrq->data;
        unsigned int data_size = 0;
        int err;
-       unsigned long flags;
-
-       mutex_lock(&pcr->pcr_mutex);
-       spin_lock_irqsave(&host->lock, flags);
-
-       if (host->mrq)
-               dev_err(sdmmc_dev(host), "error: request already exist\n");
-       host->mrq = mrq;
 
        if (host->eject) {
                cmd->error = -ENOMEDIUM;
@@ -936,6 +657,8 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
                goto finish;
        }
 
+       mutex_lock(&pcr->pcr_mutex);
+
        rtsx_pci_start_run(pcr);
 
        rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth,
@@ -944,28 +667,46 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
        rtsx_pci_write_register(pcr, CARD_SHARE_MODE,
                        CARD_SHARE_MASK, CARD_SHARE_48_SD);
 
+       mutex_lock(&host->host_mutex);
+       host->mrq = mrq;
+       mutex_unlock(&host->host_mutex);
+
        if (mrq->data)
                data_size = data->blocks * data->blksz;
 
-       if (sd_use_muti_rw(cmd))
-               host->sg_count = sd_pre_dma_transfer(host, data, NULL);
+       if (!data_size || mmc_op_multi(cmd->opcode) ||
+                       (cmd->opcode == MMC_READ_SINGLE_BLOCK) ||
+                       (cmd->opcode == MMC_WRITE_BLOCK)) {
+               sd_send_cmd_get_rsp(host, cmd);
 
-       if (!data_size || sd_use_muti_rw(cmd)) {
-               if (mrq->sbc)
-                       sd_send_cmd(host, mrq->sbc);
-               else
-                       sd_send_cmd(host, cmd);
-               spin_unlock_irqrestore(&host->lock, flags);
+               if (!cmd->error && data_size) {
+                       sd_rw_multi(host, mrq);
+
+                       if (mmc_op_multi(cmd->opcode) && mrq->stop)
+                               sd_send_cmd_get_rsp(host, mrq->stop);
+               }
        } else {
-               spin_unlock_irqrestore(&host->lock, flags);
                sd_normal_rw(host, mrq);
-               tasklet_schedule(&host->finish_tasklet);
        }
-       return;
+
+       if (mrq->data) {
+               if (cmd->error || data->error)
+                       data->bytes_xfered = 0;
+               else
+                       data->bytes_xfered = data->blocks * data->blksz;
+       }
+
+       mutex_unlock(&pcr->pcr_mutex);
 
 finish:
-       tasklet_schedule(&host->finish_tasklet);
-       spin_unlock_irqrestore(&host->lock, flags);
+       if (cmd->error)
+               dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error);
+
+       mutex_lock(&host->host_mutex);
+       host->mrq = NULL;
+       mutex_unlock(&host->host_mutex);
+
+       mmc_request_done(mmc, mrq);
 }
 
 static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
@@ -1400,8 +1141,6 @@ out:
 }
 
 static const struct mmc_host_ops realtek_pci_sdmmc_ops = {
-       .pre_req = sdmmc_pre_req,
-       .post_req = sdmmc_post_req,
        .request = sdmmc_request,
        .set_ios = sdmmc_set_ios,
        .get_ro = sdmmc_get_ro,
@@ -1465,7 +1204,6 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
        struct realtek_pci_sdmmc *host;
        struct rtsx_pcr *pcr;
        struct pcr_handle *handle = pdev->dev.platform_data;
-       unsigned long host_addr;
 
        if (!handle)
                return -ENXIO;
@@ -1489,15 +1227,8 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
        pcr->slots[RTSX_SD_CARD].p_dev = pdev;
        pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event;
 
-       host_addr = (unsigned long)host;
-       host->next_data.cookie = 1;
-       setup_timer(&host->timer, sd_request_timeout, host_addr);
-       tasklet_init(&host->cmd_tasklet, sd_get_rsp, host_addr);
-       tasklet_init(&host->data_tasklet, sd_finish_multi_rw, host_addr);
-       tasklet_init(&host->finish_tasklet, sd_finish_request, host_addr);
-       spin_lock_init(&host->lock);
+       mutex_init(&host->host_mutex);
 
-       pcr->slots[RTSX_SD_CARD].done_transfer = sd_isr_done_transfer;
        realtek_init_host(host);
 
        mmc_add_host(mmc);
@@ -1510,8 +1241,6 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
        struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
        struct rtsx_pcr *pcr;
        struct mmc_host *mmc;
-       struct mmc_request *mrq;
-       unsigned long flags;
 
        if (!host)
                return 0;
@@ -1519,33 +1248,22 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
        pcr = host->pcr;
        pcr->slots[RTSX_SD_CARD].p_dev = NULL;
        pcr->slots[RTSX_SD_CARD].card_event = NULL;
-       pcr->slots[RTSX_SD_CARD].done_transfer = NULL;
        mmc = host->mmc;
-       mrq = host->mrq;
 
-       spin_lock_irqsave(&host->lock, flags);
+       mutex_lock(&host->host_mutex);
        if (host->mrq) {
                dev_dbg(&(pdev->dev),
                        "%s: Controller removed during transfer\n",
                        mmc_hostname(mmc));
 
-               if (mrq->sbc)
-                       mrq->sbc->error = -ENOMEDIUM;
-               if (mrq->cmd)
-                       mrq->cmd->error = -ENOMEDIUM;
-               if (mrq->stop)
-                       mrq->stop->error = -ENOMEDIUM;
-               if (mrq->data)
-                       mrq->data->error = -ENOMEDIUM;
+               rtsx_pci_complete_unfinished_transfer(pcr);
 
-               tasklet_schedule(&host->finish_tasklet);
+               host->mrq->cmd->error = -ENOMEDIUM;
+               if (host->mrq->stop)
+                       host->mrq->stop->error = -ENOMEDIUM;
+               mmc_request_done(mmc, host->mrq);
        }
-       spin_unlock_irqrestore(&host->lock, flags);
-
-       del_timer_sync(&host->timer);
-       tasklet_kill(&host->cmd_tasklet);
-       tasklet_kill(&host->data_tasklet);
-       tasklet_kill(&host->finish_tasklet);
+       mutex_unlock(&host->host_mutex);
 
        mmc_remove_host(mmc);
        host->eject = true;
index 4615d79fc93f795c869687117744e01653219478..b922c8efcf4012376548c15e4a3af50562d47095 100644 (file)
@@ -523,6 +523,7 @@ static struct nand_ecclayout hwecc4_2048 = {
 #if defined(CONFIG_OF)
 static const struct of_device_id davinci_nand_of_match[] = {
        {.compatible = "ti,davinci-nand", },
+       {.compatible = "ti,keystone-nand", },
        {},
 };
 MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
@@ -581,6 +582,11 @@ static struct davinci_nand_pdata
                    of_property_read_bool(pdev->dev.of_node,
                        "ti,davinci-nand-use-bbt"))
                        pdata->bbt_options = NAND_BBT_USE_FLASH;
+
+               if (of_device_is_compatible(pdev->dev.of_node,
+                                           "ti,keystone-nand")) {
+                       pdata->options |= NAND_NO_SUBPAGE_WRITE;
+               }
        }
 
        return dev_get_platdata(&pdev->dev);
index 9f69e818b0009db7881b3f8c862393836e5a604b..93580a47cc548851a36d1ff4cb45e88636565a3e 100644 (file)
@@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
 }
 
 /* Forward declaration */
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+                                     bool strict_match);
 static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
 static void rlb_src_unlink(struct bonding *bond, u32 index);
 static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
@@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
 
        bond->alb_info.rlb_promisc_timeout_counter = 0;
 
-       alb_send_learning_packets(bond->curr_active_slave, addr);
+       alb_send_learning_packets(bond->curr_active_slave, addr, true);
 }
 
 /* slave being removed should not be active at this point
@@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
 /*********************** tlb/rlb shared functions *********************/
 
 static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
-                           u16 vid)
+                           __be16 vlan_proto, u16 vid)
 {
        struct learning_pkt pkt;
        struct sk_buff *skb;
@@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
        skb->dev = slave->dev;
 
        if (vid) {
-               skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid);
+               skb = vlan_put_tag(skb, vlan_proto, vid);
                if (!skb) {
                        pr_err("%s: Error: failed to insert VLAN tag\n",
                               slave->bond->dev->name);
@@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
        dev_queue_xmit(skb);
 }
 
-
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+                                     bool strict_match)
 {
        struct bonding *bond = bond_get_bond_by_slave(slave);
        struct net_device *upper;
        struct list_head *iter;
 
        /* send untagged */
-       alb_send_lp_vid(slave, mac_addr, 0);
+       alb_send_lp_vid(slave, mac_addr, 0, 0);
 
        /* loop through vlans and send one packet for each */
        rcu_read_lock();
        netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-               if (upper->priv_flags & IFF_802_1Q_VLAN)
-                       alb_send_lp_vid(slave, mac_addr,
-                                       vlan_dev_vlan_id(upper));
+               if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
+                       if (strict_match &&
+                           ether_addr_equal_64bits(mac_addr,
+                                                   upper->dev_addr)) {
+                               alb_send_lp_vid(slave, mac_addr,
+                                               vlan_dev_vlan_proto(upper),
+                                               vlan_dev_vlan_id(upper));
+                       } else if (!strict_match) {
+                               alb_send_lp_vid(slave, upper->dev_addr,
+                                               vlan_dev_vlan_proto(upper),
+                                               vlan_dev_vlan_id(upper));
+                       }
+               }
        }
        rcu_read_unlock();
 }
@@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
 
        /* fasten the change in the switch */
        if (SLAVE_IS_OK(slave1)) {
-               alb_send_learning_packets(slave1, slave1->dev->dev_addr);
+               alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform the clients that the mac address
                         * has changed
@@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
        }
 
        if (SLAVE_IS_OK(slave2)) {
-               alb_send_learning_packets(slave2, slave2->dev->dev_addr);
+               alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform the clients that the mac address
                         * has changed
@@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work)
 
        /* send learning packets */
        if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
+               bool strict_match;
+
                /* change of curr_active_slave involves swapping of mac addresses.
                 * in order to avoid this swapping from happening while
                 * sending the learning packets, the curr_slave_lock must be held for
@@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work)
                 */
                read_lock(&bond->curr_slave_lock);
 
-               bond_for_each_slave_rcu(bond, slave, iter)
-                       alb_send_learning_packets(slave, slave->dev->dev_addr);
+               bond_for_each_slave_rcu(bond, slave, iter) {
+                       /* If updating current_active, use all currently
+                        * user mac addreses (!strict_match).  Otherwise, only
+                        * use mac of the slave device.
+                        */
+                       strict_match = (slave != bond->curr_active_slave);
+                       alb_send_learning_packets(slave, slave->dev->dev_addr,
+                                                 strict_match);
+               }
 
                read_unlock(&bond->curr_slave_lock);
 
@@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
        } else {
                /* set the new_slave to the bond mac address */
                alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
-               alb_send_learning_packets(new_slave, bond->dev->dev_addr);
+               alb_send_learning_packets(new_slave, bond->dev->dev_addr,
+                                         false);
        }
 
        write_lock_bh(&bond->curr_slave_lock);
@@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
                alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
 
                read_lock(&bond->lock);
-               alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
+               alb_send_learning_packets(bond->curr_active_slave,
+                                         bond_dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform clients mac address has changed */
                        rlb_req_update_slave_clients(bond, bond->curr_active_slave);
index 69aff72c895716fe6c579d2bf7f46c79ddca2a36..d3a67896d43541f0b8ebc0b4e193db945087bc56 100644 (file)
@@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
  */
 static void bond_arp_send(struct net_device *slave_dev, int arp_op,
                          __be32 dest_ip, __be32 src_ip,
-                         struct bond_vlan_tag *inner,
-                         struct bond_vlan_tag *outer)
+                         struct bond_vlan_tag *tags)
 {
        struct sk_buff *skb;
+       int i;
 
        pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
                 arp_op, slave_dev->name, &dest_ip, &src_ip);
@@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
                net_err_ratelimited("ARP packet allocation failed\n");
                return;
        }
-       if (outer->vlan_id) {
-               if (inner->vlan_id) {
-                       pr_debug("inner tag: proto %X vid %X\n",
-                                ntohs(inner->vlan_proto), inner->vlan_id);
-                       skb = __vlan_put_tag(skb, inner->vlan_proto,
-                                            inner->vlan_id);
-                       if (!skb) {
-                               net_err_ratelimited("failed to insert inner VLAN tag\n");
-                               return;
-                       }
-               }
 
-               pr_debug("outer reg: proto %X vid %X\n",
-                        ntohs(outer->vlan_proto), outer->vlan_id);
-               skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id);
+       /* Go through all the tags backwards and add them to the packet */
+       for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
+               if (!tags[i].vlan_id)
+                       continue;
+
+               pr_debug("inner tag: proto %X vid %X\n",
+                        ntohs(tags[i].vlan_proto), tags[i].vlan_id);
+               skb = __vlan_put_tag(skb, tags[i].vlan_proto,
+                                    tags[i].vlan_id);
+               if (!skb) {
+                       net_err_ratelimited("failed to insert inner VLAN tag\n");
+                       return;
+               }
+       }
+       /* Set the outer tag */
+       if (tags[0].vlan_id) {
+               pr_debug("outer tag: proto %X vid %X\n",
+                        ntohs(tags[0].vlan_proto), tags[0].vlan_id);
+               skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
                if (!skb) {
                        net_err_ratelimited("failed to insert outer VLAN tag\n");
                        return;
@@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
        arp_xmit(skb);
 }
 
+/* Validate the device path between the @start_dev and the @end_dev.
+ * The path is valid if the @end_dev is reachable through device
+ * stacking.
+ * When the path is validated, collect any vlan information in the
+ * path.
+ */
+static bool bond_verify_device_path(struct net_device *start_dev,
+                                   struct net_device *end_dev,
+                                   struct bond_vlan_tag *tags)
+{
+       struct net_device *upper;
+       struct list_head  *iter;
+       int  idx;
+
+       if (start_dev == end_dev)
+               return true;
+
+       netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
+               if (bond_verify_device_path(upper, end_dev, tags)) {
+                       if (is_vlan_dev(upper)) {
+                               idx = vlan_get_encap_level(upper);
+                               if (idx >= BOND_MAX_VLAN_ENCAP)
+                                       return false;
+
+                               tags[idx].vlan_proto =
+                                                   vlan_dev_vlan_proto(upper);
+                               tags[idx].vlan_id = vlan_dev_vlan_id(upper);
+                       }
+                       return true;
+               }
+       }
+
+       return false;
+}
 
 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 {
-       struct net_device *upper, *vlan_upper;
-       struct list_head *iter, *vlan_iter;
        struct rtable *rt;
-       struct bond_vlan_tag inner, outer;
+       struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
        __be32 *targets = bond->params.arp_targets, addr;
        int i;
+       bool ret;
 
        for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
                pr_debug("basa: target %pI4\n", &targets[i]);
-               inner.vlan_proto = 0;
-               inner.vlan_id = 0;
-               outer.vlan_proto = 0;
-               outer.vlan_id = 0;
+               memset(tags, 0, sizeof(tags));
 
                /* Find out through which dev should the packet go */
                rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                                net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
                                                     bond->dev->name,
                                                     &targets[i]);
-                       bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer);
+                       bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
+                                     0, tags);
                        continue;
                }
 
@@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                        goto found;
 
                rcu_read_lock();
-               /* first we search only for vlan devices. for every vlan
-                * found we verify its upper dev list, searching for the
-                * rt->dst.dev. If found we save the tag of the vlan and
-                * proceed to send the packet.
-                */
-               netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
-                                                 vlan_iter) {
-                       if (!is_vlan_dev(vlan_upper))
-                               continue;
-
-                       if (vlan_upper == rt->dst.dev) {
-                               outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
-                               outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
-                               rcu_read_unlock();
-                               goto found;
-                       }
-                       netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
-                                                         iter) {
-                               if (upper == rt->dst.dev) {
-                                       /* If the upper dev is a vlan dev too,
-                                        *  set the vlan tag to inner tag.
-                                        */
-                                       if (is_vlan_dev(upper)) {
-                                               inner.vlan_proto = vlan_dev_vlan_proto(upper);
-                                               inner.vlan_id = vlan_dev_vlan_id(upper);
-                                       }
-                                       outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
-                                       outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
-                                       rcu_read_unlock();
-                                       goto found;
-                               }
-                       }
-               }
-
-               /* if the device we're looking for is not on top of any of
-                * our upper vlans, then just search for any dev that
-                * matches, and in case it's a vlan - save the id
-                */
-               netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-                       if (upper == rt->dst.dev) {
-                               rcu_read_unlock();
-                               goto found;
-                       }
-               }
+               ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
                rcu_read_unlock();
 
+               if (ret)
+                       goto found;
+
                /* Not our device - skip */
                pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
                         bond->dev->name, &targets[i],
@@ -2259,7 +2255,7 @@ found:
                addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
                ip_rt_put(rt);
                bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
-                             addr, &inner, &outer);
+                             addr, tags);
        }
 }
 
index 724e30fa20b9fa70166b5d9b25ed9029fab6db73..832070298446458483cdf1132cc46b63ba323a7d 100644 (file)
@@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
 static const struct bond_opt_value bond_intmax_tbl[] = {
        { "off",     0,       BOND_VALFLAG_DEFAULT},
        { "maxval",  INT_MAX, BOND_VALFLAG_MAX},
+       { NULL,      -1,      0}
 };
 
 static const struct bond_opt_value bond_lacp_rate_tbl[] = {
index b8bdd0acc8f334ac97bca2ddfea602f473c3272f..00bea320e3b50c1eaa75a712f7afbbe65ac146fb 100644 (file)
@@ -36,6 +36,7 @@
 
 #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
 
+#define BOND_MAX_VLAN_ENCAP    2
 #define BOND_MAX_ARP_TARGETS   16
 
 #define BOND_DEFAULT_MIIMON    100
index 8ab7103d4f44ea616ae8cb945eda8ea84aa3059b..61ffc12d8fd8e4e01056b06fd3ae73be7abe513e 100644 (file)
@@ -14,13 +14,6 @@ config CAN_C_CAN_PLATFORM
          SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
          boards like am335x, dm814x, dm813x and dm811x.
 
-config CAN_C_CAN_STRICT_FRAME_ORDERING
-       bool "Force a strict RX CAN frame order (may cause frame loss)"
-       ---help---
-         The RX split buffer prevents packet reordering but can cause packet
-         loss. Only enable this option when you accept to lose CAN frames
-         in favour of getting the received CAN frames in the correct order.
-
 config CAN_C_CAN_PCI
        tristate "Generic PCI Bus based C_CAN/D_CAN driver"
        depends on PCI
index a2ca820b5373841d2083d8d5de02fa9fce6c0ecf..95e04e2002daec774d394d0f0912ca7e952f280f 100644 (file)
@@ -732,26 +732,12 @@ static u32 c_can_adjust_pending(u32 pend)
 static inline void c_can_rx_object_get(struct net_device *dev,
                                       struct c_can_priv *priv, u32 obj)
 {
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-       if (obj < C_CAN_MSG_RX_LOW_LAST)
-               c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
-       else
-#endif
                c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
 }
 
 static inline void c_can_rx_finalize(struct net_device *dev,
                                     struct c_can_priv *priv, u32 obj)
 {
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-       if (obj < C_CAN_MSG_RX_LOW_LAST)
-               priv->rxmasked |= BIT(obj - 1);
-       else if (obj == C_CAN_MSG_RX_LOW_LAST) {
-               priv->rxmasked = 0;
-               /* activate all lower message objects */
-               c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
-       }
-#endif
        if (priv->type != BOSCH_D_CAN)
                c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
 }
@@ -799,9 +785,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
 {
        u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
 
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-       pend &= ~priv->rxmasked;
-#endif
        return pend;
 }
 
@@ -814,25 +797,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
  * has arrived. To work-around this issue, we keep two groups of message
  * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
  *
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
- *
- * To ensure in-order frame reception we use the following
- * approach while re-activating a message object to receive further
- * frames:
- * - if the current message object number is lower than
- *   C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
- *   the INTPND bit.
- * - if the current message object number is equal to
- *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
- *   receive message objects.
- * - if the current message object number is greater than
- *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
- *   only this message object.
- *
- * This can cause packet loss!
- *
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
- *
  * We clear the newdat bit right away.
  *
  * This can result in packet reordering when the readout is slow.
index c540e3d12e3d826260dbb590e8045c92fa4efb2b..564933ae218c78848dfba1e166f219e9de994e79 100644 (file)
@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct sja1000_priv *priv;
        struct peak_pci_chan *chan;
-       struct net_device *dev;
+       struct net_device *dev, *prev_dev;
        void __iomem *cfg_base, *reg_base;
        u16 sub_sys_id, icr;
        int i, err, channels;
@@ -688,11 +688,13 @@ failure_remove_channels:
        writew(0x0, cfg_base + PITA_ICR + 2);
 
        chan = NULL;
-       for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
-               unregister_sja1000dev(dev);
-               free_sja1000dev(dev);
+       for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
                priv = netdev_priv(dev);
                chan = priv->priv;
+               prev_dev = chan->prev_dev;
+
+               unregister_sja1000dev(dev);
+               free_sja1000dev(dev);
        }
 
        /* free any PCIeC resources too */
@@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
 
        /* Loop over all registered devices */
        while (1) {
+               struct net_device *prev_dev = chan->prev_dev;
+
                dev_info(&pdev->dev, "removing device %s\n", dev->name);
                unregister_sja1000dev(dev);
                free_sja1000dev(dev);
-               dev = chan->prev_dev;
+               dev = prev_dev;
 
                if (!dev) {
                        /* do that only for first channel */
index 39b26fe28d1051ff916faceb747da7a64dac711f..d7401017a3f10940f3a662bebc555d835be3ce4b 100644 (file)
@@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig"
 source "drivers/net/ethernet/chelsio/Kconfig"
 source "drivers/net/ethernet/cirrus/Kconfig"
 source "drivers/net/ethernet/cisco/Kconfig"
+
+config CX_ECAT
+       tristate "Beckhoff CX5020 EtherCAT master support"
+       depends on PCI
+       ---help---
+         Driver for EtherCAT master module located on CCAT FPGA
+         that can be found on Beckhoff CX5020, and possibly other of CX
+         Beckhoff CX series industrial PCs.
+
+         To compile this driver as a module, choose M here. The module
+         will be called ec_bhf.
+
 source "drivers/net/ethernet/davicom/Kconfig"
 
 config DNET
index 545d0b3b9cb422b2fefa7122b074cd869a9085c2..35190e36c4568e6803279f878a6aa866685ca1be 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_CX_ECAT) += ec_bhf.o
 obj-$(CONFIG_DM9000) += davicom/
 obj-$(CONFIG_DNET) += dnet.o
 obj-$(CONFIG_NET_VENDOR_DEC) += dec/
index d4a187e453698bbe96589921f97d5ea38bc3d5fb..3eff2fd3997e36ef128983fcaf080f595a693715 100644 (file)
@@ -5,3 +5,4 @@
 obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
 altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
 altera_msgdma.o altera_sgdma.o altera_utils.o
+ccflags-y += -D__CHECK_ENDIAN__
index 4d1f2fdd5c3275c3c9952cd118bda99b544ebd55..0fb986ba32905a5dac78c926333c9765ec4678de 100644 (file)
@@ -37,18 +37,16 @@ void msgdma_start_rxdma(struct altera_tse_private *priv)
 void msgdma_reset(struct altera_tse_private *priv)
 {
        int counter;
-       struct msgdma_csr *txcsr =
-               (struct msgdma_csr *)priv->tx_dma_csr;
-       struct msgdma_csr *rxcsr =
-               (struct msgdma_csr *)priv->rx_dma_csr;
 
        /* Reset Rx mSGDMA */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
-       iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr,
+               msgdma_csroffs(status));
+       csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr,
+               msgdma_csroffs(control));
 
        counter = 0;
        while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               if (tse_bit_is_clear(&rxcsr->status,
+               if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status),
                                     MSGDMA_CSR_STAT_RESETTING))
                        break;
                udelay(1);
@@ -59,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv)
                           "TSE Rx mSGDMA resetting bit never cleared!\n");
 
        /* clear all status bits */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status));
 
        /* Reset Tx mSGDMA */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
-       iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr,
+               msgdma_csroffs(status));
+
+       csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr,
+               msgdma_csroffs(control));
 
        counter = 0;
        while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               if (tse_bit_is_clear(&txcsr->status,
+               if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status),
                                     MSGDMA_CSR_STAT_RESETTING))
                        break;
                udelay(1);
@@ -78,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv)
                           "TSE Tx mSGDMA resetting bit never cleared!\n");
 
        /* clear all status bits */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status));
 }
 
 void msgdma_disable_rxirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->rx_dma_csr;
-       tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+                     MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_enable_rxirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->rx_dma_csr;
-       tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+                   MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_disable_txirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->tx_dma_csr;
-       tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+                     MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_enable_txirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->tx_dma_csr;
-       tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+                   MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_clear_rxirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->rx_dma_csr;
-       iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+       csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status));
 }
 
 void msgdma_clear_txirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->tx_dma_csr;
-       iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+       csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status));
 }
 
 /* return 0 to indicate transmit is pending */
 int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 {
-       struct msgdma_extended_desc *desc = priv->tx_dma_desc;
-
-       iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo);
-       iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi);
-       iowrite32(0, &desc->write_addr_lo);
-       iowrite32(0, &desc->write_addr_hi);
-       iowrite32(buffer->len, &desc->len);
-       iowrite32(0, &desc->burst_seq_num);
-       iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride);
-       iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control);
+       csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+               msgdma_descroffs(read_addr_lo));
+       csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+               msgdma_descroffs(read_addr_hi));
+       csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo));
+       csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi));
+       csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len));
+       csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num));
+       csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc,
+               msgdma_descroffs(stride));
+       csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc,
+               msgdma_descroffs(control));
        return 0;
 }
 
@@ -138,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
        u32 ready = 0;
        u32 inuse;
        u32 status;
-       struct msgdma_csr *txcsr =
-               (struct msgdma_csr *)priv->tx_dma_csr;
 
        /* Get number of sent descriptors */
-       inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
+       inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level))
+                       & 0xffff;
 
        if (inuse) { /* Tx FIFO is not empty */
                ready = priv->tx_prod - priv->tx_cons - inuse - 1;
        } else {
                /* Check for buffered last packet */
-               status = ioread32(&txcsr->status);
+               status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
                if (status & MSGDMA_CSR_STAT_BUSY)
                        ready = priv->tx_prod - priv->tx_cons - 1;
                else
@@ -162,7 +162,6 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
 void msgdma_add_rx_desc(struct altera_tse_private *priv,
                        struct tse_buffer *rxbuffer)
 {
-       struct msgdma_extended_desc *desc = priv->rx_dma_desc;
        u32 len = priv->rx_dma_buf_sz;
        dma_addr_t dma_addr = rxbuffer->dma_addr;
        u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
@@ -172,14 +171,16 @@ void msgdma_add_rx_desc(struct altera_tse_private *priv,
                        | MSGDMA_DESC_CTL_TR_ERR_IRQ
                        | MSGDMA_DESC_CTL_GO);
 
-       iowrite32(0, &desc->read_addr_lo);
-       iowrite32(0, &desc->read_addr_hi);
-       iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo);
-       iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi);
-       iowrite32(len, &desc->len);
-       iowrite32(0, &desc->burst_seq_num);
-       iowrite32(0x00010001, &desc->stride);
-       iowrite32(control, &desc->control);
+       csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo));
+       csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi));
+       csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc,
+               msgdma_descroffs(write_addr_lo));
+       csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc,
+               msgdma_descroffs(write_addr_hi));
+       csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len));
+       csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num));
+       csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride));
+       csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control));
 }
 
 /* status is returned on upper 16 bits,
@@ -190,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
        u32 rxstatus = 0;
        u32 pktlength;
        u32 pktstatus;
-       struct msgdma_csr *rxcsr =
-               (struct msgdma_csr *)priv->rx_dma_csr;
-       struct msgdma_response *rxresp =
-               (struct msgdma_response *)priv->rx_dma_resp;
-
-       if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
-               pktlength = ioread32(&rxresp->bytes_transferred);
-               pktstatus = ioread32(&rxresp->status);
+
+       if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level))
+           & 0xffff) {
+               pktlength = csrrd32(priv->rx_dma_resp,
+                                   msgdma_respoffs(bytes_transferred));
+               pktstatus = csrrd32(priv->rx_dma_resp,
+                                   msgdma_respoffs(status));
                rxstatus = pktstatus;
                rxstatus = rxstatus << 16;
                rxstatus |= (pktlength & 0xffff);
index d7b59ba4019c1fe1914b2b5c0643e464fcdff62d..e335626e1b6b5288c4f3d4cc8f45d79dcd56655d 100644 (file)
 #ifndef __ALTERA_MSGDMAHW_H__
 #define __ALTERA_MSGDMAHW_H__
 
-/* mSGDMA standard descriptor format
- */
-struct msgdma_desc {
-       u32 read_addr;  /* data buffer source address */
-       u32 write_addr; /* data buffer destination address */
-       u32 len;        /* the number of bytes to transfer per descriptor */
-       u32 control;    /* characteristics of the transfer */
-};
-
 /* mSGDMA extended descriptor format
  */
 struct msgdma_extended_desc {
@@ -159,6 +150,10 @@ struct msgdma_response {
        u32 status;
 };
 
+#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a))
+#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a))
+#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a))
+
 /* mSGDMA response register bit definitions
  */
 #define MSGDMA_RESP_EARLY_TERM BIT(8)
index 9ce8630692b6e7c3a380745ec77afe0e005f1723..99cc56f451cf4886fbf0a5bca044dfa5cf033a2c 100644 (file)
@@ -20,8 +20,8 @@
 #include "altera_sgdmahw.h"
 #include "altera_sgdma.h"
 
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
-                               struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+                               struct sgdma_descrip __iomem *ndesc,
                                dma_addr_t ndesc_phys,
                                dma_addr_t raddr,
                                dma_addr_t waddr,
@@ -31,17 +31,17 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
                                int wfixed);
 
 static int sgdma_async_write(struct altera_tse_private *priv,
-                             struct sgdma_descrip *desc);
+                             struct sgdma_descrip __iomem *desc);
 
 static int sgdma_async_read(struct altera_tse_private *priv);
 
 static dma_addr_t
 sgdma_txphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc);
+                struct sgdma_descrip __iomem *desc);
 
 static dma_addr_t
 sgdma_rxphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc);
+                struct sgdma_descrip __iomem *desc);
 
 static int sgdma_txbusy(struct altera_tse_private *priv);
 
@@ -79,7 +79,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
        priv->rxdescphys = (dma_addr_t) 0;
        priv->txdescphys = (dma_addr_t) 0;
 
-       priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
+       priv->rxdescphys = dma_map_single(priv->device,
+                                         (void __force *)priv->rx_dma_desc,
                                          priv->rxdescmem, DMA_BIDIRECTIONAL);
 
        if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@ -88,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
                return -EINVAL;
        }
 
-       priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
+       priv->txdescphys = dma_map_single(priv->device,
+                                         (void __force *)priv->tx_dma_desc,
                                          priv->txdescmem, DMA_TO_DEVICE);
 
        if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@ -98,8 +100,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
        }
 
        /* Initialize descriptor memory to all 0's, sync memory to cache */
-       memset(priv->tx_dma_desc, 0, priv->txdescmem);
-       memset(priv->rx_dma_desc, 0, priv->rxdescmem);
+       memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+       memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 
        dma_sync_single_for_device(priv->device, priv->txdescphys,
                                   priv->txdescmem, DMA_TO_DEVICE);
@@ -126,22 +128,15 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
  */
 void sgdma_reset(struct altera_tse_private *priv)
 {
-       u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
-       u32 txdescriplen   = priv->txdescmem;
-       u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
-       u32 rxdescriplen   = priv->rxdescmem;
-       struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
-       struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
-
        /* Initialize descriptor memory to 0 */
-       memset(ptxdescripmem, 0, txdescriplen);
-       memset(prxdescripmem, 0, rxdescriplen);
+       memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+       memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 
-       iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
-       iowrite32(0, &ptxsgdma->control);
+       csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
 
-       iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
-       iowrite32(0, &prxsgdma->control);
+       csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
 }
 
 /* For SGDMA, interrupts remain enabled after initially enabling,
@@ -167,14 +162,14 @@ void sgdma_disable_txirq(struct altera_tse_private *priv)
 
 void sgdma_clear_rxirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+       tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
+                   SGDMA_CTRLREG_CLRINT);
 }
 
 void sgdma_clear_txirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+       tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
+                   SGDMA_CTRLREG_CLRINT);
 }
 
 /* transmits buffer through SGDMA. Returns number of buffers
@@ -184,12 +179,11 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
  */
 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 {
-       int pktstx = 0;
-       struct sgdma_descrip *descbase =
-               (struct sgdma_descrip *)priv->tx_dma_desc;
+       struct sgdma_descrip __iomem *descbase =
+               (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
 
-       struct sgdma_descrip *cdesc = &descbase[0];
-       struct sgdma_descrip *ndesc = &descbase[1];
+       struct sgdma_descrip __iomem *cdesc = &descbase[0];
+       struct sgdma_descrip __iomem *ndesc = &descbase[1];
 
        /* wait 'til the tx sgdma is ready for the next transmit request */
        if (sgdma_txbusy(priv))
@@ -205,7 +199,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
                            0,                          /* read fixed */
                            SGDMA_CONTROL_WR_FIXED);    /* Generate SOP */
 
-       pktstx = sgdma_async_write(priv, cdesc);
+       sgdma_async_write(priv, cdesc);
 
        /* enqueue the request to the pending transmit queue */
        queue_tx(priv, buffer);
@@ -219,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 u32 sgdma_tx_completions(struct altera_tse_private *priv)
 {
        u32 ready = 0;
-       struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
 
        if (!sgdma_txbusy(priv) &&
-           ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
+           ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
+            & SGDMA_CONTROL_HW_OWNED) == 0) &&
            (dequeue_tx(priv))) {
                ready = 1;
        }
@@ -246,32 +240,31 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv,
  */
 u32 sgdma_rx_status(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
-       struct sgdma_descrip *desc = NULL;
-       int pktsrx;
-       unsigned int rxstatus = 0;
-       unsigned int pktlength = 0;
-       unsigned int pktstatus = 0;
+       struct sgdma_descrip __iomem *base =
+               (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
+       struct sgdma_descrip __iomem *desc = NULL;
        struct tse_buffer *rxbuffer = NULL;
+       unsigned int rxstatus = 0;
 
-       u32 sts = ioread32(&csr->status);
+       u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
 
        desc = &base[0];
        if (sts & SGDMA_STSREG_EOP) {
+               unsigned int pktlength = 0;
+               unsigned int pktstatus = 0;
                dma_sync_single_for_cpu(priv->device,
                                        priv->rxdescphys,
                                        priv->sgdmadesclen,
                                        DMA_FROM_DEVICE);
 
-               pktlength = desc->bytes_xferred;
-               pktstatus = desc->status & 0x3f;
-               rxstatus = pktstatus;
+               pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
+               pktstatus = csrrd8(desc, sgdma_descroffs(status));
+               rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
                rxstatus = rxstatus << 16;
                rxstatus |= (pktlength & 0xffff);
 
                if (rxstatus) {
-                       desc->status = 0;
+                       csrwr8(0, desc, sgdma_descroffs(status));
 
                        rxbuffer = dequeue_rx(priv);
                        if (rxbuffer == NULL)
@@ -279,12 +272,12 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
                                            "sgdma rx and rx queue empty!\n");
 
                        /* Clear control */
-                       iowrite32(0, &csr->control);
+                       csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
                        /* clear status */
-                       iowrite32(0xf, &csr->status);
+                       csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
 
                        /* kick the rx sgdma after reaping this descriptor */
-                       pktsrx = sgdma_async_read(priv);
+                       sgdma_async_read(priv);
 
                } else {
                        /* If the SGDMA indicated an end of packet on recv,
@@ -298,10 +291,11 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
                         */
                        netdev_err(priv->dev,
                                   "SGDMA RX Error Info: %x, %x, %x\n",
-                                  sts, desc->status, rxstatus);
+                                  sts, csrrd8(desc, sgdma_descroffs(status)),
+                                  rxstatus);
                }
        } else if (sts == 0) {
-               pktsrx = sgdma_async_read(priv);
+               sgdma_async_read(priv);
        }
 
        return rxstatus;
@@ -309,8 +303,8 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
 
 
 /* Private functions */
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
-                               struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+                               struct sgdma_descrip __iomem *ndesc,
                                dma_addr_t ndesc_phys,
                                dma_addr_t raddr,
                                dma_addr_t waddr,
@@ -320,27 +314,30 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
                                int wfixed)
 {
        /* Clear the next descriptor as not owned by hardware */
-       u32 ctrl = ndesc->control;
+
+       u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
        ctrl &= ~SGDMA_CONTROL_HW_OWNED;
-       ndesc->control = ctrl;
+       csrwr8(ctrl, ndesc, sgdma_descroffs(control));
 
-       ctrl = 0;
        ctrl = SGDMA_CONTROL_HW_OWNED;
        ctrl |= generate_eop;
        ctrl |= rfixed;
        ctrl |= wfixed;
 
        /* Channel is implicitly zero, initialized to 0 by default */
-
-       desc->raddr = raddr;
-       desc->waddr = waddr;
-       desc->next = lower_32_bits(ndesc_phys);
-       desc->control = ctrl;
-       desc->status = 0;
-       desc->rburst = 0;
-       desc->wburst = 0;
-       desc->bytes = length;
-       desc->bytes_xferred = 0;
+       csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
+       csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
+
+       csrwr32(0, desc, sgdma_descroffs(pad1));
+       csrwr32(0, desc, sgdma_descroffs(pad2));
+       csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
+
+       csrwr8(ctrl, desc, sgdma_descroffs(control));
+       csrwr8(0, desc, sgdma_descroffs(status));
+       csrwr8(0, desc, sgdma_descroffs(wburst));
+       csrwr8(0, desc, sgdma_descroffs(rburst));
+       csrwr16(length, desc, sgdma_descroffs(bytes));
+       csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
 }
 
 /* If hardware is busy, don't restart async read.
@@ -351,12 +348,11 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
  */
 static int sgdma_async_read(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       struct sgdma_descrip *descbase =
-               (struct sgdma_descrip *)priv->rx_dma_desc;
+       struct sgdma_descrip __iomem *descbase =
+               (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
 
-       struct sgdma_descrip *cdesc = &descbase[0];
-       struct sgdma_descrip *ndesc = &descbase[1];
+       struct sgdma_descrip __iomem *cdesc = &descbase[0];
+       struct sgdma_descrip __iomem *ndesc = &descbase[1];
 
        struct tse_buffer *rxbuffer = NULL;
 
@@ -382,11 +378,13 @@ static int sgdma_async_read(struct altera_tse_private *priv)
                                           priv->sgdmadesclen,
                                           DMA_TO_DEVICE);
 
-               iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
-                         &csr->next_descrip);
+               csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
+                       priv->rx_dma_csr,
+                       sgdma_csroffs(next_descrip));
 
-               iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
-                         &csr->control);
+               csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
+                       priv->rx_dma_csr,
+                       sgdma_csroffs(control));
 
                return 1;
        }
@@ -395,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv)
 }
 
 static int sgdma_async_write(struct altera_tse_private *priv,
-                            struct sgdma_descrip *desc)
+                            struct sgdma_descrip __iomem *desc)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-
        if (sgdma_txbusy(priv))
                return 0;
 
        /* clear control and status */
-       iowrite32(0, &csr->control);
-       iowrite32(0x1f, &csr->status);
+       csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
 
        dma_sync_single_for_device(priv->device, priv->txdescphys,
                                   priv->sgdmadesclen, DMA_TO_DEVICE);
 
-       iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
-                 &csr->next_descrip);
+       csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
+               priv->tx_dma_csr,
+               sgdma_csroffs(next_descrip));
 
-       iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
-                 &csr->control);
+       csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
+               priv->tx_dma_csr,
+               sgdma_csroffs(control));
 
        return 1;
 }
 
 static dma_addr_t
 sgdma_txphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc)
+                struct sgdma_descrip __iomem *desc)
 {
        dma_addr_t paddr = priv->txdescmem_busaddr;
        uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@ -429,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv,
 
 static dma_addr_t
 sgdma_rxphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc)
+                struct sgdma_descrip __iomem *desc)
 {
        dma_addr_t paddr = priv->rxdescmem_busaddr;
        uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@ -518,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv)
  */
 static int sgdma_rxbusy(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
+       return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
+                      & SGDMA_STSREG_BUSY;
 }
 
 /* waits for the tx sgdma to finish it's current operation, returns 0
@@ -528,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
 static int sgdma_txbusy(struct altera_tse_private *priv)
 {
        int delay = 0;
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
 
        /* if DMA is busy, wait for current transactino to finish */
-       while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
+       while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+               & SGDMA_STSREG_BUSY) && (delay++ < 100))
                udelay(1);
 
-       if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
+       if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+           & SGDMA_STSREG_BUSY) {
                netdev_err(priv->dev, "timeout waiting for tx dma\n");
                return 1;
        }
index ba3334f353836cd0441d537ba5fda8a28bea171e..85bc33b218d946f557647d1a73f9699e3189827b 100644 (file)
 
 /* SGDMA descriptor structure */
 struct sgdma_descrip {
-       unsigned int    raddr; /* address of data to be read */
-       unsigned int    pad1;
-       unsigned int    waddr;
-       unsigned int    pad2;
-       unsigned int    next;
-       unsigned int    pad3;
-       unsigned short  bytes;
-       unsigned char   rburst;
-       unsigned char   wburst;
-       unsigned short  bytes_xferred;  /* 16 bits, bytes xferred */
+       u32     raddr; /* address of data to be read */
+       u32     pad1;
+       u32     waddr;
+       u32     pad2;
+       u32     next;
+       u32     pad3;
+       u16     bytes;
+       u8      rburst;
+       u8      wburst;
+       u16     bytes_xferred;  /* 16 bits, bytes xferred */
 
        /* bit 0: error
         * bit 1: length error
@@ -39,7 +39,7 @@ struct sgdma_descrip {
         * bit 6: reserved
         * bit 7: status eop for recv case
         */
-       unsigned char   status;
+       u8      status;
 
        /* bit 0: eop
         * bit 1: read_fixed
@@ -47,7 +47,7 @@ struct sgdma_descrip {
         * bits 3,4,5,6: Channel (always 0)
         * bit 7: hardware owned
         */
-       unsigned char   control;
+       u8      control;
 } __packed;
 
 
@@ -101,6 +101,8 @@ struct sgdma_csr {
        u32     pad3[3];
 };
 
+#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a))
+#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a))
 
 #define SGDMA_STSREG_ERR       BIT(0) /* Error */
 #define SGDMA_STSREG_EOP       BIT(1) /* EOP */
index 465c4aabebbd49d299cb266544c3469acd0f3b6b..2adb24d4523c915d3b7d87f1294ead36757cea50 100644 (file)
@@ -357,6 +357,8 @@ struct altera_tse_mac {
        u32 reserved5[42];
 };
 
+#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a))
+
 /* Transmit and Receive Command Registers Bit Definitions
  */
 #define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC                BIT(17)
@@ -487,4 +489,49 @@ struct altera_tse_private {
  */
 void altera_tse_set_ethtool_ops(struct net_device *);
 
+static inline
+u32 csrrd32(void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+       return readl(paddr);
+}
+
+static inline
+u16 csrrd16(void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+       return readw(paddr);
+}
+
+static inline
+u8 csrrd8(void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+       return readb(paddr);
+}
+
+static inline
+void csrwr32(u32 val, void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+       writel(val, paddr);
+}
+
+static inline
+void csrwr16(u16 val, void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+       writew(val, paddr);
+}
+
+static inline
+void csrwr8(u8 val, void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+       writeb(val, paddr);
+}
+
 #endif /* __ALTERA_TSE_H__ */
index 76133caffa78eb3b1e385525fdfc7fda142e77e1..54c25eff795272661e2caabfd554ffcc7dd24e00 100644 (file)
@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
                           u64 *buf)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
        u64 ext;
 
-       buf[0] = ioread32(&mac->frames_transmitted_ok);
-       buf[1] = ioread32(&mac->frames_received_ok);
-       buf[2] = ioread32(&mac->frames_check_sequence_errors);
-       buf[3] = ioread32(&mac->alignment_errors);
+       buf[0] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_transmitted_ok));
+       buf[1] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_received_ok));
+       buf[2] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_check_sequence_errors));
+       buf[3] = csrrd32(priv->mac_dev,
+                        tse_csroffs(alignment_errors));
 
        /* Extended aOctetsTransmittedOK counter */
-       ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
-       ext |= ioread32(&mac->octets_transmitted_ok);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_octets_transmitted_ok)) << 32;
+
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(octets_transmitted_ok));
        buf[4] = ext;
 
        /* Extended aOctetsReceivedOK counter */
-       ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
-       ext |= ioread32(&mac->octets_received_ok);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_octets_received_ok)) << 32;
+
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(octets_received_ok));
        buf[5] = ext;
 
-       buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
-       buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
-       buf[8] = ioread32(&mac->if_in_errors);
-       buf[9] = ioread32(&mac->if_out_errors);
-       buf[10] = ioread32(&mac->if_in_ucast_pkts);
-       buf[11] = ioread32(&mac->if_in_multicast_pkts);
-       buf[12] = ioread32(&mac->if_in_broadcast_pkts);
-       buf[13] = ioread32(&mac->if_out_discards);
-       buf[14] = ioread32(&mac->if_out_ucast_pkts);
-       buf[15] = ioread32(&mac->if_out_multicast_pkts);
-       buf[16] = ioread32(&mac->if_out_broadcast_pkts);
-       buf[17] = ioread32(&mac->ether_stats_drop_events);
+       buf[6] = csrrd32(priv->mac_dev,
+                        tse_csroffs(tx_pause_mac_ctrl_frames));
+       buf[7] = csrrd32(priv->mac_dev,
+                        tse_csroffs(rx_pause_mac_ctrl_frames));
+       buf[8] = csrrd32(priv->mac_dev,
+                        tse_csroffs(if_in_errors));
+       buf[9] = csrrd32(priv->mac_dev,
+                        tse_csroffs(if_out_errors));
+       buf[10] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_ucast_pkts));
+       buf[11] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_multicast_pkts));
+       buf[12] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_broadcast_pkts));
+       buf[13] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_discards));
+       buf[14] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_ucast_pkts));
+       buf[15] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_multicast_pkts));
+       buf[16] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_broadcast_pkts));
+       buf[17] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_drop_events));
 
        /* Extended etherStatsOctets counter */
-       ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
-       ext |= ioread32(&mac->ether_stats_octets);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_ether_stats_octets)) << 32;
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(ether_stats_octets));
        buf[18] = ext;
 
-       buf[19] = ioread32(&mac->ether_stats_pkts);
-       buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
-       buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
-       buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
-       buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
-       buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
-       buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
-       buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
-       buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
-       buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
-       buf[29] = ioread32(&mac->ether_stats_jabbers);
-       buf[30] = ioread32(&mac->ether_stats_fragments);
+       buf[19] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts));
+       buf[20] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_undersize_pkts));
+       buf[21] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_oversize_pkts));
+       buf[22] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_64_octets));
+       buf[23] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_65to127_octets));
+       buf[24] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_128to255_octets));
+       buf[25] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_256to511_octets));
+       buf[26] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_512to1023_octets));
+       buf[27] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_1024to1518_octets));
+       buf[28] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_1519tox_octets));
+       buf[29] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_jabbers));
+       buf[30] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_fragments));
 }
 
 static int tse_sset_count(struct net_device *dev, int sset)
@@ -178,7 +213,6 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 {
        int i;
        struct altera_tse_private *priv = netdev_priv(dev);
-       u32 *tse_mac_regs = (u32 *)priv->mac_dev;
        u32 *buf = regbuf;
 
        /* Set version to a known value, so ethtool knows
@@ -196,7 +230,7 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
        regs->version = 1;
 
        for (i = 0; i < TSE_NUM_REGS; i++)
-               buf[i] = ioread32(&tse_mac_regs[i]);
+               buf[i] = csrrd32(priv->mac_dev, i * 4);
 }
 
 static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
index e44a4aeb970142a6622d77c52b30a27c58a6e786..7330681574d20ccd02855beabedc9b01c8ae2b48 100644 (file)
@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
  */
 static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
-       struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
-       unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
-       u32 data;
+       struct net_device *ndev = bus->priv;
+       struct altera_tse_private *priv = netdev_priv(ndev);
 
        /* set MDIO address */
-       iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+       csrwr32((mii_id & 0x1f), priv->mac_dev,
+               tse_csroffs(mdio_phy0_addr));
 
        /* get the data */
-       data = ioread32(&mdio_regs[regnum]) & 0xffff;
-       return data;
+       return csrrd32(priv->mac_dev,
+                      tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
 }
 
 static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
                                 u16 value)
 {
-       struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
-       unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
+       struct net_device *ndev = bus->priv;
+       struct altera_tse_private *priv = netdev_priv(ndev);
 
        /* set MDIO address */
-       iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+       csrwr32((mii_id & 0x1f), priv->mac_dev,
+               tse_csroffs(mdio_phy0_addr));
 
        /* write the data */
-       iowrite32((u32) value, &mdio_regs[regnum]);
+       csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
        return 0;
 }
 
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
        for (i = 0; i < PHY_MAX_ADDR; i++)
                mdio->irq[i] = PHY_POLL;
 
-       mdio->priv = priv->mac_dev;
+       mdio->priv = dev;
        mdio->parent = priv->device;
 
        ret = of_mdiobus_register(mdio, mdio_node);
@@ -563,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned int nopaged_len = skb_headlen(skb);
        enum netdev_tx ret = NETDEV_TX_OK;
        dma_addr_t dma_addr;
-       int txcomplete = 0;
 
        spin_lock_bh(&priv->tx_lock);
 
@@ -599,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
        dma_sync_single_for_device(priv->device, buffer->dma_addr,
                                   buffer->len, DMA_TO_DEVICE);
 
-       txcomplete = priv->dmaops->tx_buffer(priv, buffer);
+       priv->dmaops->tx_buffer(priv, buffer);
 
        skb_tx_timestamp(skb);
 
@@ -698,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
        struct altera_tse_private *priv = netdev_priv(dev);
        struct phy_device *phydev = NULL;
        char phy_id_fmt[MII_BUS_ID_SIZE + 3];
-       int ret;
 
        if (priv->phy_addr != POLL_PHY) {
                snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
@@ -712,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
                        netdev_err(dev, "Could not attach to PHY\n");
 
        } else {
+               int ret;
                phydev = phy_find_first(priv->mdio);
                if (phydev == NULL) {
                        netdev_err(dev, "No PHY found\n");
@@ -791,7 +791,6 @@ static int init_phy(struct net_device *dev)
 
 static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
 {
-       struct altera_tse_mac *mac = priv->mac_dev;
        u32 msb;
        u32 lsb;
 
@@ -799,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
        lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
 
        /* Set primary MAC address */
-       iowrite32(msb, &mac->mac_addr_0);
-       iowrite32(lsb, &mac->mac_addr_1);
+       csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
+       csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
 }
 
 /* MAC software reset.
@@ -811,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
  */
 static int reset_mac(struct altera_tse_private *priv)
 {
-       void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
        int counter;
        u32 dat;
 
-       dat = ioread32(cmd_cfg_reg);
+       dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
        dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
        dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
-       iowrite32(dat, cmd_cfg_reg);
+       csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 
        counter = 0;
        while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET))
+               if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
+                                    MAC_CMDCFG_SW_RESET))
                        break;
                udelay(1);
        }
 
        if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               dat = ioread32(cmd_cfg_reg);
+               dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
                dat &= ~MAC_CMDCFG_SW_RESET;
-               iowrite32(dat, cmd_cfg_reg);
+               csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
                return -1;
        }
        return 0;
@@ -840,41 +839,57 @@ static int reset_mac(struct altera_tse_private *priv)
 */
 static int init_mac(struct altera_tse_private *priv)
 {
-       struct altera_tse_mac *mac = priv->mac_dev;
        unsigned int cmd = 0;
        u32 frm_length;
 
        /* Setup Rx FIFO */
-       iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
-                 &mac->rx_section_empty);
-       iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full);
-       iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty);
-       iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full);
+       csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
+               priv->mac_dev, tse_csroffs(rx_section_empty));
+
+       csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
+               tse_csroffs(rx_section_full));
+
+       csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
+               tse_csroffs(rx_almost_empty));
+
+       csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
+               tse_csroffs(rx_almost_full));
 
        /* Setup Tx FIFO */
-       iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
-                 &mac->tx_section_empty);
-       iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full);
-       iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty);
-       iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full);
+       csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
+               priv->mac_dev, tse_csroffs(tx_section_empty));
+
+       csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
+               tse_csroffs(tx_section_full));
+
+       csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
+               tse_csroffs(tx_almost_empty));
+
+       csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
+               tse_csroffs(tx_almost_full));
 
        /* MAC Address Configuration */
        tse_update_mac_addr(priv, priv->dev->dev_addr);
 
        /* MAC Function Configuration */
        frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
-       iowrite32(frm_length, &mac->frm_length);
-       iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length);
+       csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
+
+       csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
+               tse_csroffs(tx_ipg_length));
 
        /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
         * start address
         */
-       tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
-       tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
-                                        ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
+       tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
+                   ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
+
+       tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
+                     ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
+                     ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
 
        /* Set the MAC options */
-       cmd = ioread32(&mac->command_config);
+       cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
        cmd &= ~MAC_CMDCFG_PAD_EN;      /* No padding Removal on Receive */
        cmd &= ~MAC_CMDCFG_CRC_FWD;     /* CRC Removal */
        cmd |= MAC_CMDCFG_RX_ERR_DISC;  /* Automatically discard frames
@@ -889,9 +904,10 @@ static int init_mac(struct altera_tse_private *priv)
        cmd &= ~MAC_CMDCFG_ETH_SPEED;
        cmd &= ~MAC_CMDCFG_ENA_10;
 
-       iowrite32(cmd, &mac->command_config);
+       csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
 
-       iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta);
+       csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
+               tse_csroffs(pause_quanta));
 
        if (netif_msg_hw(priv))
                dev_dbg(priv->device,
@@ -904,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv)
  */
 static void tse_set_mac(struct altera_tse_private *priv, bool enable)
 {
-       struct altera_tse_mac *mac = priv->mac_dev;
-       u32 value = ioread32(&mac->command_config);
+       u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 
        if (enable)
                value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
        else
                value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 
-       iowrite32(value, &mac->command_config);
+       csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
 }
 
 /* Change the MTU
@@ -942,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
 static void altera_tse_set_mcfilter(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
        int i;
        struct netdev_hw_addr *ha;
 
        /* clear the hash filter */
        for (i = 0; i < 64; i++)
-               iowrite32(0, &(mac->hash_table[i]));
+               csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 
        netdev_for_each_mc_addr(ha, dev) {
                unsigned int hash = 0;
@@ -964,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
 
                        hash = (hash << 1) | xor_bit;
                }
-               iowrite32(1, &(mac->hash_table[hash]));
+               csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
        }
 }
 
@@ -972,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
 static void altera_tse_set_mcfilterall(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
        int i;
 
        /* set the hash filter */
        for (i = 0; i < 64; i++)
-               iowrite32(1, &(mac->hash_table[i]));
+               csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 }
 
 /* Set or clear the multicast filter for this adaptor
@@ -985,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
 static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
 
        spin_lock(&priv->mac_cfg_lock);
 
        if (dev->flags & IFF_PROMISC)
-               tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+               tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+                           MAC_CMDCFG_PROMIS_EN);
 
        if (dev->flags & IFF_ALLMULTI)
                altera_tse_set_mcfilterall(dev);
@@ -1005,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 static void tse_set_rx_mode(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
 
        spin_lock(&priv->mac_cfg_lock);
 
        if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
            !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
-               tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+               tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+                           MAC_CMDCFG_PROMIS_EN);
        else
-               tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+               tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
+                             MAC_CMDCFG_PROMIS_EN);
 
        spin_unlock(&priv->mac_cfg_lock);
 }
@@ -1362,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev)
                of_property_read_bool(pdev->dev.of_node,
                                      "altr,has-hash-multicast-filter");
 
+       /* Set hash filter to not set for now until the
+        * multicast filter receive issue is debugged
+        */
+       priv->hash_filter = 0;
+
        /* get supplemental address settings for this instance */
        priv->added_unicast =
                of_property_read_bool(pdev->dev.of_node,
@@ -1493,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev)
        return 0;
 }
 
-struct altera_dmaops altera_dtype_sgdma = {
+static const struct altera_dmaops altera_dtype_sgdma = {
        .altera_dtype = ALTERA_DTYPE_SGDMA,
        .dmamask = 32,
        .reset_dma = sgdma_reset,
@@ -1512,7 +1531,7 @@ struct altera_dmaops altera_dtype_sgdma = {
        .start_rxdma = sgdma_start_rxdma,
 };
 
-struct altera_dmaops altera_dtype_msgdma = {
+static const struct altera_dmaops altera_dtype_msgdma = {
        .altera_dtype = ALTERA_DTYPE_MSGDMA,
        .dmamask = 64,
        .reset_dma = msgdma_reset,
index 70fa13f486b2fc4ef1d6fc45bb82977d13ea1930..d7eeb1713ad2b85721a533fbcc469d22db5740ea 100644 (file)
 #include "altera_tse.h"
 #include "altera_utils.h"
 
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        value |= bit_mask;
-       iowrite32(value, ioaddr);
+       csrwr32(value, ioaddr, offs);
 }
 
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        value &= ~bit_mask;
-       iowrite32(value, ioaddr);
+       csrwr32(value, ioaddr, offs);
 }
 
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        return (value & bit_mask) ? 1 : 0;
 }
 
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        return (value & bit_mask) ? 0 : 1;
 }
index ce1db36d35832a974f9f958f0b57c298343c0967..baf100ccf5872c7c18ac365ddfe314308b78c9b5 100644 (file)
@@ -19,9 +19,9 @@
 #ifndef __ALTERA_UTILS_H__
 #define __ALTERA_UTILS_H__
 
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask);
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask);
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask);
 
 #endif /* __ALTERA_UTILS_H__*/
index b260913db23609ba34daf1f0e7d2165a7c2cc867..3b0d43154e677bfe8fd1f66aac7d3c0a6afbcfd5 100644 (file)
@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
 #define BCM_5710_UNDI_FW_MF_MAJOR      (0x07)
 #define BCM_5710_UNDI_FW_MF_MINOR      (0x08)
 #define BCM_5710_UNDI_FW_MF_VERS       (0x05)
-#define BNX2X_PREV_UNDI_MF_PORT(p)     (0x1a150c + ((p) << 4))
-#define BNX2X_PREV_UNDI_MF_FUNC(f)     (0x1a184c + ((f) << 4))
+#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
 static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
 {
        u8 major, minor, version;
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
        /* Reset should be performed after BRB is emptied */
        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
                u32 timer_count = 1000;
+               bool need_write = true;
 
                /* Close the MAC Rx to prevent BRB from filling up */
                bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
                         * cleaning methods - might be redundant but harmless.
                         */
                        if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
-                               bnx2x_prev_unload_undi_mf(bp);
+                               if (need_write) {
+                                       bnx2x_prev_unload_undi_mf(bp);
+                                       need_write = false;
+                               }
                        } else if (prev_undi) {
                                /* If UNDI resides in memory,
                                 * manually increment it
index 81cc2d9831c2192edcf414fa90dd4aa5962285aa..b8078d50261bf0944f456acc466c73c138b03306 100644 (file)
@@ -2695,7 +2695,7 @@ out:
                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
        }
 
-       return 0;
+       return rc;
 }
 
 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
index 0c067e8564dd4e15e43a7e22f3e064091234b99a..784c7155b98a1977f2b809efe4ee5a45b23dc44b 100644 (file)
@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
 out:
        bnx2x_vfpf_finalize(bp, &req->first_tlv);
 
-       return 0;
+       return rc;
 }
 
 /* request pf to config rss table for vf queues*/
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
new file mode 100644 (file)
index 0000000..4884205
--- /dev/null
@@ -0,0 +1,706 @@
+ /*
+ * drivers/net/ethernet/beckhoff/ec_bhf.c
+ *
+ * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This is a driver for EtherCAT master module present on CCAT FPGA.
+ * Those can be found on Bechhoff CX50xx industrial PCs.
+ */
+
+#if 0
+#define DEBUG
+#endif
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+
+#define TIMER_INTERVAL_NSEC    20000
+
+#define INFO_BLOCK_SIZE                0x10
+#define INFO_BLOCK_TYPE                0x0
+#define INFO_BLOCK_REV         0x2
+#define INFO_BLOCK_BLK_CNT     0x4
+#define INFO_BLOCK_TX_CHAN     0x4
+#define INFO_BLOCK_RX_CHAN     0x5
+#define INFO_BLOCK_OFFSET      0x8
+
+#define EC_MII_OFFSET          0x4
+#define EC_FIFO_OFFSET         0x8
+#define EC_MAC_OFFSET          0xc
+
+#define MAC_FRAME_ERR_CNT      0x0
+#define MAC_RX_ERR_CNT         0x1
+#define MAC_CRC_ERR_CNT                0x2
+#define MAC_LNK_LST_ERR_CNT    0x3
+#define MAC_TX_FRAME_CNT       0x10
+#define MAC_RX_FRAME_CNT       0x14
+#define MAC_TX_FIFO_LVL                0x20
+#define MAC_DROPPED_FRMS       0x28
+#define MAC_CONNECTED_CCAT_FLAG        0x78
+
+#define MII_MAC_ADDR           0x8
+#define MII_MAC_FILT_FLAG      0xe
+#define MII_LINK_STATUS                0xf
+
+#define FIFO_TX_REG            0x0
+#define FIFO_TX_RESET          0x8
+#define FIFO_RX_REG            0x10
+#define FIFO_RX_ADDR_VALID     (1u << 31)
+#define FIFO_RX_RESET          0x18
+
+#define DMA_CHAN_OFFSET                0x1000
+#define DMA_CHAN_SIZE          0x8
+
+#define DMA_WINDOW_SIZE_MASK   0xfffffffc
+
+static struct pci_device_id ids[] = {
+       { PCI_DEVICE(0x15ec, 0x5000), },
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ids);
+
+struct rx_header {
+#define RXHDR_NEXT_ADDR_MASK   0xffffffu
+#define RXHDR_NEXT_VALID       (1u << 31)
+       __le32 next;
+#define RXHDR_NEXT_RECV_FLAG   0x1
+       __le32 recv;
+#define RXHDR_LEN_MASK         0xfffu
+       __le16 len;
+       __le16 port;
+       __le32 reserved;
+       u8 timestamp[8];
+} __packed;
+
+#define PKT_PAYLOAD_SIZE       0x7e8
+struct rx_desc {
+       struct rx_header header;
+       u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+struct tx_header {
+       __le16 len;
+#define TX_HDR_PORT_0          0x1
+#define TX_HDR_PORT_1          0x2
+       u8 port;
+       u8 ts_enable;
+#define TX_HDR_SENT            0x1
+       __le32 sent;
+       u8 timestamp[8];
+} __packed;
+
+struct tx_desc {
+       struct tx_header header;
+       u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+#define FIFO_SIZE              64
+
+static long polling_frequency = TIMER_INTERVAL_NSEC;
+
+struct bhf_dma {
+       u8 *buf;
+       size_t len;
+       dma_addr_t buf_phys;
+
+       u8 *alloc;
+       size_t alloc_len;
+       dma_addr_t alloc_phys;
+};
+
+struct ec_bhf_priv {
+       struct net_device *net_dev;
+
+       struct pci_dev *dev;
+
+       void * __iomem io;
+       void * __iomem dma_io;
+
+       struct hrtimer hrtimer;
+
+       int tx_dma_chan;
+       int rx_dma_chan;
+       void * __iomem ec_io;
+       void * __iomem fifo_io;
+       void * __iomem mii_io;
+       void * __iomem mac_io;
+
+       struct bhf_dma rx_buf;
+       struct rx_desc *rx_descs;
+       int rx_dnext;
+       int rx_dcount;
+
+       struct bhf_dma tx_buf;
+       struct tx_desc *tx_descs;
+       int tx_dcount;
+       int tx_dnext;
+
+       u64 stat_rx_bytes;
+       u64 stat_tx_bytes;
+};
+
+#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
+
+#define ETHERCAT_MASTER_ID     0x14
+
+static void ec_bhf_print_status(struct ec_bhf_priv *priv)
+{
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       dev_dbg(dev, "Frame error counter: %d\n",
+               ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
+       dev_dbg(dev, "RX error counter: %d\n",
+               ioread8(priv->mac_io + MAC_RX_ERR_CNT));
+       dev_dbg(dev, "CRC error counter: %d\n",
+               ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
+       dev_dbg(dev, "TX frame counter: %d\n",
+               ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
+       dev_dbg(dev, "RX frame counter: %d\n",
+               ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
+       dev_dbg(dev, "TX fifo level: %d\n",
+               ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
+       dev_dbg(dev, "Dropped frames: %d\n",
+               ioread8(priv->mac_io + MAC_DROPPED_FRMS));
+       dev_dbg(dev, "Connected with CCAT slot: %d\n",
+               ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
+       dev_dbg(dev, "Link status: %d\n",
+               ioread8(priv->mii_io + MII_LINK_STATUS));
+}
+
+static void ec_bhf_reset(struct ec_bhf_priv *priv)
+{
+       iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
+       iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
+       iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
+       iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
+
+       iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
+       iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
+
+       iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
+}
+
+static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
+{
+       u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
+       u32 addr = (u8 *)desc - priv->tx_buf.buf;
+
+       iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
+
+       dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
+}
+
+static int ec_bhf_desc_sent(struct tx_desc *desc)
+{
+       return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
+}
+
+static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
+{
+       if (unlikely(netif_queue_stopped(priv->net_dev))) {
+               /* Make sure that we perceive changes to tx_dnext. */
+               smp_rmb();
+
+               if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
+                       netif_wake_queue(priv->net_dev);
+       }
+}
+
+static int ec_bhf_pkt_received(struct rx_desc *desc)
+{
+       return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
+}
+
+static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
+{
+       iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
+                 priv->fifo_io + FIFO_RX_REG);
+}
+
+static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
+{
+       struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       while (ec_bhf_pkt_received(desc)) {
+               int pkt_size = (le16_to_cpu(desc->header.len) &
+                              RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
+               u8 *data = desc->data;
+               struct sk_buff *skb;
+
+               skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
+               dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
+
+               if (skb) {
+                       memcpy(skb_put(skb, pkt_size), data, pkt_size);
+                       skb->protocol = eth_type_trans(skb, priv->net_dev);
+                       dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
+
+                       priv->stat_rx_bytes += pkt_size;
+
+                       netif_rx(skb);
+               } else {
+                       dev_err_ratelimited(dev,
+                               "Couldn't allocate a skb_buff for a packet of size %u\n",
+                               pkt_size);
+               }
+
+               desc->header.recv = 0;
+
+               ec_bhf_add_rx_desc(priv, desc);
+
+               priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
+               desc = &priv->rx_descs[priv->rx_dnext];
+       }
+
+}
+
+static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
+{
+       struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
+                                               hrtimer);
+       ec_bhf_process_rx(priv);
+       ec_bhf_process_tx(priv);
+
+       if (!netif_running(priv->net_dev))
+               return HRTIMER_NORESTART;
+
+       hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
+       return HRTIMER_RESTART;
+}
+
+static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
+{
+       struct device *dev = PRIV_TO_DEV(priv);
+       unsigned block_count, i;
+       void * __iomem ec_info;
+
+       dev_dbg(dev, "Info block:\n");
+       dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
+       dev_dbg(dev, "Revision of function: %x\n",
+               (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
+
+       block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
+       dev_dbg(dev, "Number of function blocks: %x\n", block_count);
+
+       for (i = 0; i < block_count; i++) {
+               u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
+                                   INFO_BLOCK_TYPE);
+               if (type == ETHERCAT_MASTER_ID)
+                       break;
+       }
+       if (i == block_count) {
+               dev_err(dev, "EtherCAT master with DMA block not found\n");
+               return -ENODEV;
+       }
+       dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
+
+       ec_info = priv->io + i * INFO_BLOCK_SIZE;
+       dev_dbg(dev, "EtherCAT master revision: %d\n",
+               ioread16(ec_info + INFO_BLOCK_REV));
+
+       priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
+       dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
+               priv->tx_dma_chan);
+
+       priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
+       dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
+                priv->rx_dma_chan);
+
+       priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
+       priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
+       priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
+       priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
+
+       dev_dbg(dev,
+               "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
+               priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
+
+       return 0;
+}
+
+static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
+                                    struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct tx_desc *desc;
+       unsigned len;
+
+       dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
+
+       desc = &priv->tx_descs[priv->tx_dnext];
+
+       skb_copy_and_csum_dev(skb, desc->data);
+       len = skb->len;
+
+       memset(&desc->header, 0, sizeof(desc->header));
+       desc->header.len = cpu_to_le16(len);
+       desc->header.port = TX_HDR_PORT_0;
+
+       ec_bhf_send_packet(priv, desc);
+
+       priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
+
+       if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
+               /* Make sure that update updates to tx_dnext are perceived
+                * by timer routine.
+                */
+               smp_wmb();
+
+               netif_stop_queue(net_dev);
+
+               dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
+               ec_bhf_print_status(priv);
+       }
+
+       priv->stat_tx_bytes += len;
+
+       dev_kfree_skb(skb);
+
+       return NETDEV_TX_OK;
+}
+
+static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
+                               struct bhf_dma *buf,
+                               int channel,
+                               int size)
+{
+       int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
+       struct device *dev = PRIV_TO_DEV(priv);
+       u32 mask;
+
+       iowrite32(0xffffffff, priv->dma_io + offset);
+
+       mask = ioread32(priv->dma_io + offset);
+       mask &= DMA_WINDOW_SIZE_MASK;
+       dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
+
+       /* We want to allocate a chunk of memory that is:
+        * - aligned to the mask we just read
+        * - is of size 2^mask bytes (at most)
+        * In order to ensure that we will allocate buffer of
+        * 2 * 2^mask bytes.
+        */
+       buf->len = min_t(int, ~mask + 1, size);
+       buf->alloc_len = 2 * buf->len;
+
+       dev_dbg(dev, "Allocating %d bytes for channel %d",
+               (int)buf->alloc_len, channel);
+       buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
+                                       GFP_KERNEL);
+       if (buf->alloc == NULL) {
+               dev_info(dev, "Failed to allocate buffer\n");
+               return -ENOMEM;
+       }
+
+       buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
+       buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
+
+       iowrite32(0, priv->dma_io + offset + 4);
+       iowrite32(buf->buf_phys, priv->dma_io + offset);
+       dev_dbg(dev, "Buffer: %x and read from dev: %x",
+               (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
+
+       return 0;
+}
+
+static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
+{
+       int i = 0;
+
+       priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
+       priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
+       priv->tx_dnext = 0;
+
+       for (i = 0; i < priv->tx_dcount; i++)
+               priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
+}
+
+static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
+{
+       int i;
+
+       priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
+       priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
+       priv->rx_dnext = 0;
+
+       for (i = 0; i < priv->rx_dcount; i++) {
+               struct rx_desc *desc = &priv->rx_descs[i];
+               u32 next;
+
+               if (i != priv->rx_dcount - 1)
+                       next = (u8 *)(desc + 1) - priv->rx_buf.buf;
+               else
+                       next = 0;
+               next |= RXHDR_NEXT_VALID;
+               desc->header.next = cpu_to_le32(next);
+               desc->header.recv = 0;
+               ec_bhf_add_rx_desc(priv, desc);
+       }
+}
+
+static int ec_bhf_open(struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct device *dev = PRIV_TO_DEV(priv);
+       int err = 0;
+
+       dev_info(dev, "Opening device\n");
+
+       ec_bhf_reset(priv);
+
+       err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
+                                  FIFO_SIZE * sizeof(struct rx_desc));
+       if (err) {
+               dev_err(dev, "Failed to allocate rx buffer\n");
+               goto out;
+       }
+       ec_bhf_setup_rx_descs(priv);
+
+       dev_info(dev, "RX buffer allocated, address: %x\n",
+                (unsigned)priv->rx_buf.buf_phys);
+
+       err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
+                                  FIFO_SIZE * sizeof(struct tx_desc));
+       if (err) {
+               dev_err(dev, "Failed to allocate tx buffer\n");
+               goto error_rx_free;
+       }
+       dev_dbg(dev, "TX buffer allocated, addres: %x\n",
+               (unsigned)priv->tx_buf.buf_phys);
+
+       iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
+
+       ec_bhf_setup_tx_descs(priv);
+
+       netif_start_queue(net_dev);
+
+       hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       priv->hrtimer.function = ec_bhf_timer_fun;
+       hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
+                     HRTIMER_MODE_REL);
+
+       dev_info(PRIV_TO_DEV(priv), "Device open\n");
+
+       ec_bhf_print_status(priv);
+
+       return 0;
+
+error_rx_free:
+       dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
+                         priv->rx_buf.alloc_len);
+out:
+       return err;
+}
+
+static int ec_bhf_stop(struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       hrtimer_cancel(&priv->hrtimer);
+
+       ec_bhf_reset(priv);
+
+       netif_tx_disable(net_dev);
+
+       dma_free_coherent(dev, priv->tx_buf.alloc_len,
+                         priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
+       dma_free_coherent(dev, priv->rx_buf.alloc_len,
+                         priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
+
+       return 0;
+}
+
+static struct rtnl_link_stats64 *
+ec_bhf_get_stats(struct net_device *net_dev,
+                struct rtnl_link_stats64 *stats)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+       stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
+                               ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
+                               ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
+       stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
+       stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
+       stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
+
+       stats->tx_bytes = priv->stat_tx_bytes;
+       stats->rx_bytes = priv->stat_rx_bytes;
+
+       return stats;
+}
+
+static const struct net_device_ops ec_bhf_netdev_ops = {
+       .ndo_start_xmit         = ec_bhf_start_xmit,
+       .ndo_open               = ec_bhf_open,
+       .ndo_stop               = ec_bhf_stop,
+       .ndo_get_stats64        = ec_bhf_get_stats,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = eth_mac_addr
+};
+
+static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+       struct net_device *net_dev;
+       struct ec_bhf_priv *priv;
+       void * __iomem dma_io;
+       void * __iomem io;
+       int err = 0;
+
+       err = pci_enable_device(dev);
+       if (err)
+               return err;
+
+       pci_set_master(dev);
+
+       err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+       if (err) {
+               dev_err(&dev->dev,
+                       "Required dma mask not supported, failed to initialize device\n");
+               err = -EIO;
+               goto err_disable_dev;
+       }
+
+       err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
+       if (err) {
+               dev_err(&dev->dev,
+                       "Required dma mask not supported, failed to initialize device\n");
+               goto err_disable_dev;
+       }
+
+       err = pci_request_regions(dev, "ec_bhf");
+       if (err) {
+               dev_err(&dev->dev, "Failed to request pci memory regions\n");
+               goto err_disable_dev;
+       }
+
+       io = pci_iomap(dev, 0, 0);
+       if (!io) {
+               dev_err(&dev->dev, "Failed to map pci card memory bar 0");
+               err = -EIO;
+               goto err_release_regions;
+       }
+
+       dma_io = pci_iomap(dev, 2, 0);
+       if (!dma_io) {
+               dev_err(&dev->dev, "Failed to map pci card memory bar 2");
+               err = -EIO;
+               goto err_unmap;
+       }
+
+       net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
+       if (net_dev == 0) {
+               err = -ENOMEM;
+               goto err_unmap_dma_io;
+       }
+
+       pci_set_drvdata(dev, net_dev);
+       SET_NETDEV_DEV(net_dev, &dev->dev);
+
+       net_dev->features = 0;
+       net_dev->flags |= IFF_NOARP;
+
+       net_dev->netdev_ops = &ec_bhf_netdev_ops;
+
+       priv = netdev_priv(net_dev);
+       priv->net_dev = net_dev;
+       priv->io = io;
+       priv->dma_io = dma_io;
+       priv->dev = dev;
+
+       err = ec_bhf_setup_offsets(priv);
+       if (err < 0)
+               goto err_free_net_dev;
+
+       memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
+
+       dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
+               net_dev->dev_addr);
+
+       err = register_netdev(net_dev);
+       if (err < 0)
+               goto err_free_net_dev;
+
+       return 0;
+
+err_free_net_dev:
+       free_netdev(net_dev);
+err_unmap_dma_io:
+       pci_iounmap(dev, dma_io);
+err_unmap:
+       pci_iounmap(dev, io);
+err_release_regions:
+       pci_release_regions(dev);
+err_disable_dev:
+       pci_clear_master(dev);
+       pci_disable_device(dev);
+
+       return err;
+}
+
+static void ec_bhf_remove(struct pci_dev *dev)
+{
+       struct net_device *net_dev = pci_get_drvdata(dev);
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+       unregister_netdev(net_dev);
+       free_netdev(net_dev);
+
+       pci_iounmap(dev, priv->dma_io);
+       pci_iounmap(dev, priv->io);
+       pci_release_regions(dev);
+       pci_clear_master(dev);
+       pci_disable_device(dev);
+}
+
+static struct pci_driver pci_driver = {
+       .name           = "ec_bhf",
+       .id_table       = ids,
+       .probe          = ec_bhf_probe,
+       .remove         = ec_bhf_remove,
+};
+
+static int __init ec_bhf_init(void)
+{
+       return pci_register_driver(&pci_driver);
+}
+
+static void __exit ec_bhf_exit(void)
+{
+       pci_unregister_driver(&pci_driver);
+}
+
+module_init(ec_bhf_init);
+module_exit(ec_bhf_exit);
+
+module_param(polling_frequency, long, S_IRUGO);
+MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
index a18645407d2152b43353a50b76ccf6317ef90151..dc19bc5dec7732c02220e009ad08ffc393aa1c2c 100644 (file)
@@ -4949,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
        if (status)
                goto err;
 
+       /* On some BE3 FW versions, after a HW reset,
+        * interrupts will remain disabled for each function.
+        * So, explicitly enable interrupts
+        */
+       be_intr_set(adapter, true);
+
        /* tell fw we're ready to fire cmds */
        status = be_cmd_fw_init(adapter);
        if (status)
index b0c6050479eb460ae306cccaa93926f738e64c2e..b78378cea5e39b6e18627b9f39ac5249c40293fd 100644 (file)
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
        return idx;
 }
 
-static void
+static int
 jme_fill_tx_map(struct pci_dev *pdev,
                struct txdesc *txdesc,
                struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
                                len,
                                PCI_DMA_TODEVICE);
 
+       if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
+               return -EINVAL;
+
        pci_dma_sync_single_for_device(pdev,
                                       dmaaddr,
                                       len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
 
        txbi->mapping = dmaaddr;
        txbi->len = len;
+       return 0;
 }
 
-static void
+static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
+{
+       struct jme_ring *txring = &(jme->txring[0]);
+       struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
+       int mask = jme->tx_ring_mask;
+       int j;
+
+       for (j = 0 ; j < count ; j++) {
+               ctxbi = txbi + ((startidx + j + 2) & (mask));
+               pci_unmap_page(jme->pdev,
+                               ctxbi->mapping,
+                               ctxbi->len,
+                               PCI_DMA_TODEVICE);
+
+                               ctxbi->mapping = 0;
+                               ctxbi->len = 0;
+       }
+
+}
+
+static int
 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
 {
        struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        int mask = jme->tx_ring_mask;
        const struct skb_frag_struct *frag;
        u32 len;
+       int ret = 0;
 
        for (i = 0 ; i < nr_frags ; ++i) {
                frag = &skb_shinfo(skb)->frags[i];
                ctxdesc = txdesc + ((idx + i + 2) & (mask));
                ctxbi = txbi + ((idx + i + 2) & (mask));
 
-               jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
+               ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
                                skb_frag_page(frag),
                                frag->page_offset, skb_frag_size(frag), hidma);
+               if (ret) {
+                       jme_drop_tx_map(jme, idx, i);
+                       goto out;
+               }
+
        }
 
        len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
        ctxdesc = txdesc + ((idx + 1) & (mask));
        ctxbi = txbi + ((idx + 1) & (mask));
-       jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
+       ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
                        offset_in_page(skb->data), len, hidma);
+       if (ret)
+               jme_drop_tx_map(jme, idx, i);
+
+out:
+       return ret;
 
 }
 
+
 static int
 jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
 {
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        struct txdesc *txdesc;
        struct jme_buffer_info *txbi;
        u8 flags;
+       int ret = 0;
 
        txdesc = (struct txdesc *)txring->desc + idx;
        txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
                jme_tx_csum(jme, skb, &flags);
        jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
-       jme_map_tx_skb(jme, skb, idx);
+       ret = jme_map_tx_skb(jme, skb, idx);
+       if (ret)
+               return ret;
+
        txdesc->desc1.flags = flags;
        /*
         * Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
                return NETDEV_TX_BUSY;
        }
 
-       jme_fill_tx_desc(jme, skb, idx);
+       if (jme_fill_tx_desc(jme, skb, idx))
+               return NETDEV_TX_OK;
 
        jwrite32(jme, JME_TXCS, jme->reg_txcs |
                                TXCS_SELECT_QUEUE0 |
index 78099eab767374319c7e258bfa1f0d6df4c64fa3..92d3249f63f19a71b5ab9ed2e661cb4a8048134a 100644 (file)
@@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = {
        },
        {
                .opcode = MLX4_CMD_UPDATE_QP,
-               .has_inbox = false,
+               .has_inbox = true,
                .has_outbox = false,
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = mlx4_CMD_EPERM_wrapper
+               .wrapper = mlx4_UPDATE_QP_wrapper
        },
        {
                .opcode = MLX4_CMD_GET_OP_REQ,
index f9c46510196341a6089b0a23d7b53455dad69ae5..212cea440f90c73424d777f94c71fe61a0c48543 100644 (file)
@@ -1195,6 +1195,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                           struct mlx4_cmd_mailbox *outbox,
                           struct mlx4_cmd_info *cmd);
 
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+
 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
                         struct mlx4_vhcr *vhcr,
                         struct mlx4_cmd_mailbox *inbox,
index 61d64ebffd56e64b0fa8bf2d0fb69308e3d02c49..fbd32af89c7c0c2b94a36faec71f4e004048050d 100644 (file)
@@ -389,6 +389,41 @@ err_icm:
 
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
+#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
+int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                  enum mlx4_update_qp_attr attr,
+                  struct mlx4_update_qp_params *params)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_update_qp_context *cmd;
+       u64 pri_addr_path_mask = 0;
+       int err = 0;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       cmd = (struct mlx4_update_qp_context *)mailbox->buf;
+
+       if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+               return -EINVAL;
+
+       if (attr & MLX4_UPDATE_QP_SMAC) {
+               pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
+               cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
+       }
+
+       cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
+
+       err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
+                      MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_update_qp);
+
 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
 {
        struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
index 1c3fdd4a1f7df3fe84847ae7ff278d652db13463..8f1254a79832c24b96b227de0c30ba3ed0116520 100644 (file)
@@ -3895,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
 
 }
 
+#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd_info)
+{
+       int err;
+       u32 qpn = vhcr->in_modifier & 0xffffff;
+       struct res_qp *rqp;
+       u64 mac;
+       unsigned port;
+       u64 pri_addr_path_mask;
+       struct mlx4_update_qp_context *cmd;
+       int smac_index;
+
+       cmd = (struct mlx4_update_qp_context *)inbox->buf;
+
+       pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
+       if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
+           (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
+               return -EPERM;
+
+       /* Just change the smac for the QP */
+       err = get_res(dev, slave, qpn, RES_QP, &rqp);
+       if (err) {
+               mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
+               return err;
+       }
+
+       port = (rqp->sched_queue >> 6 & 1) + 1;
+       smac_index = cmd->qp_context.pri_path.grh_mylmc;
+       err = mac_find_smac_ix_in_slave(dev, slave, port,
+                                       smac_index, &mac);
+       if (err) {
+               mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+                        qpn, smac_index);
+               goto err_mac;
+       }
+
+       err = mlx4_cmd(dev, inbox->dma,
+                      vhcr->in_modifier, 0,
+                      MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_NATIVE);
+       if (err) {
+               mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
+               goto err_mac;
+       }
+
+err_mac:
+       put_res(dev, slave, qpn, RES_QP);
+       return err;
+}
+
 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                                         struct mlx4_vhcr *vhcr,
                                         struct mlx4_cmd_mailbox *inbox,
index 7b52a88923ef2e53fadf9aca0185e2af492aa7be..f785d01c7d123dde875a774ebf1027800af33425 100644 (file)
@@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
                                tx_ring->producer;
 }
 
-static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
-                                            struct net_device *netdev)
-{
-       int err;
-
-       netdev->num_tx_queues = adapter->drv_tx_rings;
-       netdev->real_num_tx_queues = adapter->drv_tx_rings;
-
-       err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
-       if (err)
-               netdev_err(netdev, "failed to set %d Tx queues\n",
-                          adapter->drv_tx_rings);
-
-       return err;
-}
-
 struct qlcnic_nic_template {
        int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
        int (*config_led) (struct qlcnic_adapter *, u32, u32);
index 0bc914859e38be2c52e070edb82061fa2a6a1e48..7e55e88a81bf26ede0928225fa85998254280195 100644 (file)
@@ -2206,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
        ahw->max_uc_count = count;
 }
 
+static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+                                     u8 tx_queues, u8 rx_queues)
+{
+       struct net_device *netdev = adapter->netdev;
+       int err = 0;
+
+       if (tx_queues) {
+               err = netif_set_real_num_tx_queues(netdev, tx_queues);
+               if (err) {
+                       netdev_err(netdev, "failed to set %d Tx queues\n",
+                                  tx_queues);
+                       return err;
+               }
+       }
+
+       if (rx_queues) {
+               err = netif_set_real_num_rx_queues(netdev, rx_queues);
+               if (err)
+                       netdev_err(netdev, "failed to set %d Rx queues\n",
+                                  rx_queues);
+       }
+
+       return err;
+}
+
 int
 qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
                    int pci_using_dac)
@@ -2269,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->irq = adapter->msix_entries[0].vector;
 
-       err = qlcnic_set_real_num_queues(adapter, netdev);
+       err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
+                                        adapter->drv_sds_rings);
        if (err)
                return err;
 
@@ -2943,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
                            tx_ring->tx_stats.xmit_called,
                            tx_ring->tx_stats.xmit_on,
                            tx_ring->tx_stats.xmit_off);
+
+               if (tx_ring->crb_intr_mask)
+                       netdev_info(netdev, "crb_intr_mask=%d\n",
+                                   readl(tx_ring->crb_intr_mask));
+
                netdev_info(netdev,
-                           "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
-                           readl(tx_ring->crb_intr_mask),
+                           "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
                            readl(tx_ring->crb_cmd_producer),
                            tx_ring->producer, tx_ring->sw_consumer,
                            le32_to_cpu(*(tx_ring->hw_consumer)));
@@ -3978,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
 int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       u8 tx_rings, rx_rings;
        int err;
 
        if (test_bit(__QLCNIC_RESETTING, &adapter->state))
                return -EBUSY;
 
+       tx_rings = adapter->drv_tss_rings;
+       rx_rings = adapter->drv_rss_rings;
+
        netif_device_detach(netdev);
+
+       err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
+       if (err)
+               goto done;
+
        if (netif_running(netdev))
                __qlcnic_down(adapter, netdev);
 
@@ -4003,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
                return err;
        }
 
-       netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+       /* Check if we need to update real_num_{tx|rx}_queues because
+        * qlcnic_setup_intr() may change Tx/Rx rings size
+        */
+       if ((tx_rings != adapter->drv_tx_rings) ||
+           (rx_rings != adapter->drv_sds_rings)) {
+               err = qlcnic_set_real_num_queues(adapter,
+                                                adapter->drv_tx_rings,
+                                                adapter->drv_sds_rings);
+               if (err)
+                       goto done;
+       }
 
        if (qlcnic_83xx_check(adapter)) {
                qlcnic_83xx_initialize_nic(adapter, 1);
index 32d969e857f7befc79bf4a6f18cb153c350b374b..89b83e59e1dc601898ddd60bd0fa704fdd7b6d43 100644 (file)
@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
        efx->net_dev->rx_cpu_rmap = NULL;
 #endif
 
-       /* Disable MSI/MSI-X interrupts */
-       efx_for_each_channel(channel, efx)
-               free_irq(channel->irq, &efx->msi_context[channel->channel]);
-
-       /* Disable legacy interrupt */
-       if (efx->legacy_irq)
+       if (EFX_INT_MODE_USE_MSI(efx)) {
+               /* Disable MSI/MSI-X interrupts */
+               efx_for_each_channel(channel, efx)
+                       free_irq(channel->irq,
+                                &efx->msi_context[channel->channel]);
+       } else {
+               /* Disable legacy interrupt */
                free_irq(efx->legacy_irq, efx);
+       }
 }
 
 /* Register dump */
index d940034acdd4aa465153f80ae0d5881cb648d6a8..0f4841d2e8dc9b556bde072a1786697ea7041460 100644 (file)
@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev)
                if (ret) {
                        pr_err("%s: Cannot attach to PHY (error: %d)\n",
                               __func__, ret);
-                       goto phy_error;
+                       return ret;
                }
        }
 
@@ -1779,8 +1779,6 @@ init_error:
 dma_desc_error:
        if (priv->phydev)
                phy_disconnect(priv->phydev);
-phy_error:
-       clk_disable_unprepare(priv->stmmac_clk);
 
        return ret;
 }
index df8d383acf48ed0da087bb19d144499484ac0376..b9ac20f42651bd90e33e5fcb3da38db5401117a2 100644 (file)
@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp)
        int i;
 
        for (i = 0; i < N_TX_RINGS; i++)
-               spin_lock(&cp->tx_lock[i]);
+               spin_lock_nested(&cp->tx_lock[i], i);
 }
 
 static inline void cas_lock_all(struct cas *cp)
index 36aa109416c4c3a387a440795a8d3611cf3d4ded..c331b7ebc8124585f989d6fb3dcdbaf8a3d7d3ed 100644 (file)
@@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
                phyid = be32_to_cpup(parp+1);
                mdio = of_find_device_by_node(mdio_node);
-
-               if (strncmp(mdio->name, "gpio", 4) == 0) {
-                       /* GPIO bitbang MDIO driver attached */
-                       struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
-
-                       snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-                                PHY_ID_FMT, bus->id, phyid);
-               } else {
-                       /* davinci MDIO driver attached */
-                       snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-                                PHY_ID_FMT, mdio->name, phyid);
+               of_node_put(mdio_node);
+               if (!mdio) {
+                       pr_err("Missing mdio platform device\n");
+                       return -EINVAL;
                }
+               snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+                        PHY_ID_FMT, mdio->name, phyid);
 
                mac_addr = of_get_mac_address(slave_node);
                if (mac_addr)
index b0e2865a6810782e115d61287ed23578646a855e..d53e299ae1d97ca39c4f3af511b97c5dacb32827 100644 (file)
@@ -458,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct net_device *lowerdev = vlan->lowerdev;
 
-       if (change & IFF_ALLMULTI)
-               dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+       if (dev->flags & IFF_UP) {
+               if (change & IFF_ALLMULTI)
+                       dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+       }
 }
 
 static void macvlan_set_mac_lists(struct net_device *dev)
@@ -515,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
 #define MACVLAN_STATE_MASK \
        ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
 
+static int macvlan_get_nest_level(struct net_device *dev)
+{
+       return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
+}
+
 static void macvlan_set_lockdep_class_one(struct net_device *dev,
                                          struct netdev_queue *txq,
                                          void *_unused)
@@ -525,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
 
 static void macvlan_set_lockdep_class(struct net_device *dev)
 {
-       lockdep_set_class(&dev->addr_list_lock,
-                         &macvlan_netdev_addr_lock_key);
+       lockdep_set_class_and_subclass(&dev->addr_list_lock,
+                                      &macvlan_netdev_addr_lock_key,
+                                      macvlan_get_nest_level(dev));
        netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
 }
 
@@ -721,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
        .ndo_fdb_add            = macvlan_fdb_add,
        .ndo_fdb_del            = macvlan_fdb_del,
        .ndo_fdb_dump           = ndo_dflt_fdb_dump,
+       .ndo_get_lock_subclass  = macvlan_get_nest_level,
 };
 
 void macvlan_common_setup(struct net_device *dev)
@@ -849,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
        vlan->dev      = dev;
        vlan->port     = port;
        vlan->set_features = MACVLAN_FEATURES;
+       vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
 
        vlan->mode     = MACVLAN_MODE_VEPA;
        if (data && data[IFLA_MACVLAN_MODE])
index 9c4defdec67b09299f38f1b06bf8eacbccd007d1..5f1a2250018fec5ba01a2a1a1a11f2d736f544a9 100644 (file)
@@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev)
        if (pdev->dev.of_node) {
                pdata = mdio_gpio_of_get_data(pdev);
                bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
+               if (bus_id < 0) {
+                       dev_warn(&pdev->dev, "failed to get alias id\n");
+                       bus_id = 0;
+               }
        } else {
                pdata = dev_get_platdata(&pdev->dev);
                bus_id = pdev->id;
index a972056b22498c15596a88f6b348e0f066ddb85b..3bc079a67a3dc85a222e2b784f7f65ec55dc6b59 100644 (file)
@@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work)
        struct delayed_work *dwork = to_delayed_work(work);
        struct phy_device *phydev =
                        container_of(dwork, struct phy_device, state_queue);
-       int needs_aneg = 0, do_suspend = 0;
+       bool needs_aneg = false, do_suspend = false, do_resume = false;
        int err = 0;
 
        mutex_lock(&phydev->lock);
@@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work)
        case PHY_PENDING:
                break;
        case PHY_UP:
-               needs_aneg = 1;
+               needs_aneg = true;
 
                phydev->link_timeout = PHY_AN_TIMEOUT;
 
@@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work)
                        phydev->adjust_link(phydev->attached_dev);
 
                } else if (0 == phydev->link_timeout--)
-                       needs_aneg = 1;
+                       needs_aneg = true;
                break;
        case PHY_NOLINK:
                err = phy_read_status(phydev);
@@ -791,7 +791,7 @@ void phy_state_machine(struct work_struct *work)
                        netif_carrier_on(phydev->attached_dev);
                } else {
                        if (0 == phydev->link_timeout--)
-                               needs_aneg = 1;
+                               needs_aneg = true;
                }
 
                phydev->adjust_link(phydev->attached_dev);
@@ -827,7 +827,7 @@ void phy_state_machine(struct work_struct *work)
                        phydev->link = 0;
                        netif_carrier_off(phydev->attached_dev);
                        phydev->adjust_link(phydev->attached_dev);
-                       do_suspend = 1;
+                       do_suspend = true;
                }
                break;
        case PHY_RESUMING:
@@ -876,6 +876,7 @@ void phy_state_machine(struct work_struct *work)
                        }
                        phydev->adjust_link(phydev->attached_dev);
                }
+               do_resume = true;
                break;
        }
 
@@ -883,9 +884,10 @@ void phy_state_machine(struct work_struct *work)
 
        if (needs_aneg)
                err = phy_start_aneg(phydev);
-
-       if (do_suspend)
+       else if (do_suspend)
                phy_suspend(phydev);
+       else if (do_resume)
+               phy_resume(phydev);
 
        if (err < 0)
                phy_error(phydev);
index 0ce606624296a80492b18d89a27634614aad5497..4987a1c6dc52e63220edb6fc94eea925a9fdf122 100644 (file)
@@ -614,8 +614,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
        err = phy_init_hw(phydev);
        if (err)
                phy_detach(phydev);
-
-       phy_resume(phydev);
+       else
+               phy_resume(phydev);
 
        return err;
 }
index c9f3281506af568e534a47789418b466a0a77adc..2e025ddcef210bc888dd8a8ff81473a32c3dd154 100644 (file)
@@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
        cdc_ncm_unbind(dev, intf);
 }
 
+/* verify that the ethernet protocol is IPv4 or IPv6 */
+static bool is_ip_proto(__be16 proto)
+{
+       switch (proto) {
+       case htons(ETH_P_IP):
+       case htons(ETH_P_IPV6):
+               return true;
+       }
+       return false;
+}
 
 static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
 {
@@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
        struct cdc_ncm_ctx *ctx = info->ctx;
        __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
        u16 tci = 0;
+       bool is_ip;
        u8 *c;
 
        if (!ctx)
@@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
                if (skb->len <= ETH_HLEN)
                        goto error;
 
+               /* Some applications using e.g. packet sockets will
+                * bypass the VLAN acceleration and create tagged
+                * ethernet frames directly.  We primarily look for
+                * the accelerated out-of-band tag, but fall back if
+                * required
+                */
+               skb_reset_mac_header(skb);
+               if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
+                   __vlan_get_tag(skb, &tci) == 0) {
+                       is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+                       skb_pull(skb, VLAN_ETH_HLEN);
+               } else {
+                       is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
+                       skb_pull(skb, ETH_HLEN);
+               }
+
                /* mapping VLANs to MBIM sessions:
                 *   no tag     => IPS session <0>
                 *   1 - 255    => IPS session <vlanid>
                 *   256 - 511  => DSS session <vlanid - 256>
                 *   512 - 4095 => unsupported, drop
                 */
-               vlan_get_tag(skb, &tci);
-
                switch (tci & 0x0f00) {
                case 0x0000: /* VLAN ID 0 - 255 */
-                       /* verify that datagram is IPv4 or IPv6 */
-                       skb_reset_mac_header(skb);
-                       switch (eth_hdr(skb)->h_proto) {
-                       case htons(ETH_P_IP):
-                       case htons(ETH_P_IPV6):
-                               break;
-                       default:
+                       if (!is_ip)
                                goto error;
-                       }
                        c = (u8 *)&sign;
                        c[3] = tci;
                        break;
@@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
                                  "unsupported tci=0x%04x\n", tci);
                        goto error;
                }
-               skb_pull(skb, ETH_HLEN);
        }
 
        spin_lock_bh(&ctx->mtx);
@@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
                return;
 
        /* need to send the NA on the VLAN dev, if any */
-       if (tci)
+       rcu_read_lock();
+       if (tci) {
                netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
                                              tci);
-       else
+               if (!netdev) {
+                       rcu_read_unlock();
+                       return;
+               }
+       } else {
                netdev = dev->net;
-       if (!netdev)
-               return;
+       }
+       dev_hold(netdev);
+       rcu_read_unlock();
 
        in6_dev = in6_dev_get(netdev);
        if (!in6_dev)
-               return;
+               goto out;
        is_router = !!in6_dev->cnf.forwarding;
        in6_dev_put(in6_dev);
 
@@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
                                 true /* solicited */,
                                 false /* override */,
                                 true /* inc_opt */);
+out:
+       dev_put(netdev);
 }
 
 static bool is_neigh_solicit(u8 *buf, size_t len)
index f46cd0250e488217ca4aa517be12924e3b61c6a8..5627917c5ff761137061467e1b9d71f281ce0652 100644 (file)
@@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
 
        if ((vif->type == NL80211_IFTYPE_AP ||
             vif->type == NL80211_IFTYPE_MESH_POINT) &&
-           bss_conf->enable_beacon)
+           bss_conf->enable_beacon) {
                priv->reconfig_beacon = true;
+               priv->rearm_ani = true;
+       }
 
        if (bss_conf->assoc) {
                priv->rearm_ani = true;
@@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
 
        ath9k_htc_ps_wakeup(priv);
 
+       ath9k_htc_stop_ani(priv);
        del_timer_sync(&priv->tx.cleanup_timer);
        ath9k_htc_tx_drain(priv);
 
index afb3d15e38ff0379a99c5e2c534be23c57b94e38..be1985296bdc75e725cdc161aa4101cea4a19476 100644 (file)
@@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
        if (!err) {
                /* only set 2G bandwidth using bw_cap command */
                band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
-               band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT);
+               band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
                err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
                                               sizeof(band_bwcap));
        } else {
index fa858d548d13c0bd794b98dc4da2053893b460dc..0489314425cbdf4a4b782867644ee9a0a0ca4b63 100644 (file)
@@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
                bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
 
        if (IWL_MVM_BT_COEX_CORUNNING) {
-               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
-                                                   BT_VALID_CORUN_LUT_40);
+               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
+                                                    BT_VALID_CORUN_LUT_40);
                bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
        }
 
        if (IWL_MVM_BT_COEX_MPLUT) {
                bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
-               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
+               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
        }
 
        if (mvm->cfg->bt_shared_single_ant)
index 9426905de6b283dc0230cf51d5a694478da7797a..d73a89ecd78aa0963c40a268fc7dd8e3d7fbe29d 100644 (file)
@@ -183,9 +183,9 @@ enum iwl_scan_type {
  *     this number of packets were received (typically 1)
  * @passive2active: is auto switching from passive to active during scan allowed
  * @rxchain_sel_flags: RXON_RX_CHAIN_*
- * @max_out_time: in usecs, max out of serving channel time
+ * @max_out_time: in TUs, max out of serving channel time
  * @suspend_time: how long to pause scan when returning to service channel:
- *     bits 0-19: beacon interal in usecs (suspend before executing)
+ *     bits 0-19: beacon interal in TUs (suspend before executing)
  *     bits 20-23: reserved
  *     bits 24-31: number of beacons (suspend between channels)
  * @rxon_flags: RXON_FLG_*
@@ -383,8 +383,8 @@ enum scan_framework_client {
  * @quiet_plcp_th:     quiet channel num of packets threshold
  * @good_CRC_th:       passive to active promotion threshold
  * @rx_chain:          RXON rx chain.
- * @max_out_time:      max uSec to be out of assoceated channel
- * @suspend_time:      pause scan this long when returning to service channel
+ * @max_out_time:      max TUs to be out of assoceated channel
+ * @suspend_time:      pause scan this TUs when returning to service channel
  * @flags:             RXON flags
  * @filter_flags:      RXONfilter
  * @tx_cmd:            tx command for active scan; for 2GHz and for 5GHz.
index f0cebf12c7b8415a3c787d0cc77a9b2b1c2a15ef..b41dc84e9431e6c625d55a28d5cd3ca7a885cf08 100644 (file)
@@ -1007,7 +1007,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
        memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
        len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
        if (ret)
                IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
 }
@@ -1023,7 +1023,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
        if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
                return;
 
-       ieee80211_iterate_active_interfaces(
+       ieee80211_iterate_active_interfaces_atomic(
                mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
                iwl_mvm_mc_iface_iterator, &iter_data);
 }
@@ -1807,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
+       if (!iwl_mvm_is_idle(mvm)) {
+               ret = -EBUSY;
+               goto out;
+       }
+
        switch (mvm->scan_status) {
        case IWL_MVM_SCAN_OS:
                IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
index d564233a65da6157c1aaf16a099ddf94b3be933e..f1ec0986c3c912865f0d51e1056ec03c786d8cc2 100644 (file)
@@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
        return mvmvif->low_latency;
 }
 
+/* Assoc status */
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
+
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
index 9f52c5b3f0ec0e9b2da5949f2af88bbcd13d89ce..e1c838899363b373d176a71bc32d02282d56c018 100644 (file)
@@ -1010,7 +1010,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
                return;
        }
 
-#ifdef CPTCFG_MAC80211_DEBUGFS
+#ifdef CONFIG_MAC80211_DEBUGFS
        /* Disable last tx check if we are debugging with fixed rate */
        if (lq_sta->dbg_fixed_rate) {
                IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
index c91dc8498852c46653cc43fddb57c382d3d7f3f0..c28de54c75d400d551a0be87d4501cc98fb5967a 100644 (file)
@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
                                            IEEE80211_IFACE_ITER_NORMAL,
                                            iwl_mvm_scan_condition_iterator,
                                            &global_bound);
-       /*
-        * Under low latency traffic passive scan is fragmented meaning
-        * that dwell on a particular channel will be fragmented. Each fragment
-        * dwell time is 20ms and fragments period is 105ms. Skipping to next
-        * channel will be delayed by the same period - 105ms. So suspend_time
-        * parameter describing both fragments and channels skipping periods is
-        * set to 105ms. This value is chosen so that overall passive scan
-        * duration will not be too long. Max_out_time in this case is set to
-        * 70ms, so for active scanning operating channel will be left for 70ms
-        * while for passive still for 20ms (fragment dwell).
-        */
-       if (global_bound) {
-               if (!iwl_mvm_low_latency(mvm)) {
-                       params->suspend_time = ieee80211_tu_to_usec(100);
-                       params->max_out_time = ieee80211_tu_to_usec(600);
-               } else {
-                       params->suspend_time = ieee80211_tu_to_usec(105);
-                       /* P2P doesn't support fragmented passive scan, so
-                        * configure max_out_time to be at least longest dwell
-                        * time for passive scan.
-                        */
-                       if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
-                               params->max_out_time = ieee80211_tu_to_usec(70);
-                               params->passive_fragmented = true;
-                       } else {
-                               u32 passive_dwell;
 
-                               /*
-                                * Use band G so that passive channel dwell time
-                                * will be assigned with maximum value.
-                                */
-                               band = IEEE80211_BAND_2GHZ;
-                               passive_dwell = iwl_mvm_get_passive_dwell(band);
-                               params->max_out_time =
-                                       ieee80211_tu_to_usec(passive_dwell);
-                       }
-               }
+       if (!global_bound)
+               goto not_bound;
+
+       params->suspend_time = 100;
+       params->max_out_time = 600;
+
+       if (iwl_mvm_low_latency(mvm)) {
+               params->suspend_time = 250;
+               params->max_out_time = 250;
        }
 
+not_bound:
+
        for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
-               if (params->passive_fragmented)
-                       params->dwell[band].passive = 20;
-               else
-                       params->dwell[band].passive =
-                               iwl_mvm_get_passive_dwell(band);
+               params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
                params->dwell[band].active = iwl_mvm_get_active_dwell(band,
                                                                      n_ssids);
        }
@@ -761,7 +732,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
        int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
        int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
        int head = 0;
-       int tail = band_2ghz + band_5ghz;
+       int tail = band_2ghz + band_5ghz - 1;
        u32 ssid_bitmap;
        int cmd_len;
        int ret;
index d619851745a19ba6d3bf605555fcdbd5a09f8341..2180902266ae6636513346df888c9d68abf0a57e 100644 (file)
@@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
 
        return result;
 }
+
+static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+       bool *idle = _data;
+
+       if (!vif->bss_conf.idle)
+               *idle = false;
+}
+
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
+{
+       bool idle = true;
+
+       ieee80211_iterate_active_interfaces_atomic(
+                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                       iwl_mvm_idle_iter, &idle);
+
+       return idle;
+}
index dcfd6d866d095081d7001795c4ec802c3044926f..2365553f1ef79d59c3e8598335e48eb43e5cfdcb 100644 (file)
@@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
         * PCI Tx retries from interfering with C3 CPU state */
        pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
 
+       trans->dev = &pdev->dev;
+       trans_pcie->pci_dev = pdev;
+       iwl_disable_interrupts(trans);
+
        err = pci_enable_msi(pdev);
        if (err) {
                dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                }
        }
 
-       trans->dev = &pdev->dev;
-       trans_pcie->pci_dev = pdev;
        trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
        trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
        snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                goto out_pci_disable_msi;
        }
 
-       trans_pcie->inta_mask = CSR_INI_SET_MASK;
-
        if (iwl_pcie_alloc_ict(trans))
                goto out_free_cmd_pool;
 
@@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                goto out_free_ict;
        }
 
+       trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
        return trans;
 
 out_free_ict:
index 630a3fcf65bc8113fd6b67ce587f26fbf46c9774..0d4a285cbd7edb45408360a83952ab288debf62d 100644 (file)
@@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
                              grant_ref_t rx_ring_ref);
 
 /* Check for SKBs from frontend and schedule backend processing */
-void xenvif_check_rx_xenvif(struct xenvif *vif);
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
 
 /* Prevent the device from generating any further traffic. */
 void xenvif_carrier_off(struct xenvif *vif);
index ef05c5c49d413d5bb23a3e4adbff88cb0c9ad7cd..20e9defa10606d7d54a0a5412eb93365ce748f5c 100644 (file)
@@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
        work_done = xenvif_tx_action(vif, budget);
 
        if (work_done < budget) {
-               int more_to_do = 0;
-               unsigned long flags;
-
-               /* It is necessary to disable IRQ before calling
-                * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
-                * lose event from the frontend.
-                *
-                * Consider:
-                *   RING_HAS_UNCONSUMED_REQUESTS
-                *   <frontend generates event to trigger napi_schedule>
-                *   __napi_complete
-                *
-                * This handler is still in scheduled state so the
-                * event has no effect at all. After __napi_complete
-                * this handler is descheduled and cannot get
-                * scheduled again. We lose event in this case and the ring
-                * will be completely stalled.
-                */
-
-               local_irq_save(flags);
-
-               RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
-               if (!more_to_do)
-                       __napi_complete(napi);
-
-               local_irq_restore(flags);
+               napi_complete(napi);
+               xenvif_napi_schedule_or_enable_events(vif);
        }
 
        return work_done;
@@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif)
        enable_irq(vif->tx_irq);
        if (vif->tx_irq != vif->rx_irq)
                enable_irq(vif->rx_irq);
-       xenvif_check_rx_xenvif(vif);
+       xenvif_napi_schedule_or_enable_events(vif);
 }
 
 static void xenvif_down(struct xenvif *vif)
index 76665405c5aac32d57eeadf355007cbb2b50cbec..7367208ee8cdd8b324ce661b48aa69c1d884855b 100644 (file)
@@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
 
 /* Find the containing VIF's structure from a pointer in pending_tx_info array
  */
-static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
+static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
 {
        u16 pending_idx = ubuf->desc;
        struct pending_tx_info *temp =
@@ -322,6 +322,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
        }
 }
 
+/*
+ * Find the grant ref for a given frag in a chain of struct ubuf_info's
+ * skb: the skb itself
+ * i: the frag's number
+ * ubuf: a pointer to an element in the chain. It should not be NULL
+ *
+ * Returns a pointer to the element in the chain where the page were found. If
+ * not found, returns NULL.
+ * See the definition of callback_struct in common.h for more details about
+ * the chain.
+ */
+static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
+                                               const int i,
+                                               const struct ubuf_info *ubuf)
+{
+       struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
+
+       do {
+               u16 pending_idx = ubuf->desc;
+
+               if (skb_shinfo(skb)->frags[i].page.p ==
+                   foreign_vif->mmap_pages[pending_idx])
+                       break;
+               ubuf = (struct ubuf_info *) ubuf->ctx;
+       } while (ubuf);
+
+       return ubuf;
+}
+
 /*
  * Prepare an SKB to be transmitted to the frontend.
  *
@@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        int head = 1;
        int old_meta_prod;
        int gso_type;
-       struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
-       grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
-       struct xenvif *foreign_vif = NULL;
+       const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
+       const struct ubuf_info *const head_ubuf = ubuf;
 
        old_meta_prod = npo->meta_prod;
 
@@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        npo->copy_off = 0;
        npo->copy_gref = req->gref;
 
-       if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
-                (ubuf->callback == &xenvif_zerocopy_callback)) {
-               int i = 0;
-               foreign_vif = ubuf_to_vif(ubuf);
-
-               do {
-                       u16 pending_idx = ubuf->desc;
-                       foreign_grefs[i++] =
-                               foreign_vif->pending_tx_info[pending_idx].req.gref;
-                       ubuf = (struct ubuf_info *) ubuf->ctx;
-               } while (ubuf);
-       }
-
        data = skb->data;
        while (data < skb_tail_pointer(skb)) {
                unsigned int offset = offset_in_page(data);
@@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        }
 
        for (i = 0; i < nr_frags; i++) {
+               /* This variable also signals whether foreign_gref has a real
+                * value or not.
+                */
+               struct xenvif *foreign_vif = NULL;
+               grant_ref_t foreign_gref;
+
+               if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
+                       (ubuf->callback == &xenvif_zerocopy_callback)) {
+                       const struct ubuf_info *const startpoint = ubuf;
+
+                       /* Ideally ubuf points to the chain element which
+                        * belongs to this frag. Or if frags were removed from
+                        * the beginning, then shortly before it.
+                        */
+                       ubuf = xenvif_find_gref(skb, i, ubuf);
+
+                       /* Try again from the beginning of the list, if we
+                        * haven't tried from there. This only makes sense in
+                        * the unlikely event of reordering the original frags.
+                        * For injected local pages it's an unnecessary second
+                        * run.
+                        */
+                       if (unlikely(!ubuf) && startpoint != head_ubuf)
+                               ubuf = xenvif_find_gref(skb, i, head_ubuf);
+
+                       if (likely(ubuf)) {
+                               u16 pending_idx = ubuf->desc;
+
+                               foreign_vif = ubuf_to_vif(ubuf);
+                               foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
+                               /* Just a safety measure. If this was the last
+                                * element on the list, the for loop will
+                                * iterate again if a local page were added to
+                                * the end. Using head_ubuf here prevents the
+                                * second search on the chain. Or the original
+                                * frags changed order, but that's less likely.
+                                * In any way, ubuf shouldn't be NULL.
+                                */
+                               ubuf = ubuf->ctx ?
+                                       (struct ubuf_info *) ubuf->ctx :
+                                       head_ubuf;
+                       } else
+                               /* This frag was a local page, added to the
+                                * array after the skb left netback.
+                                */
+                               ubuf = head_ubuf;
+               }
                xenvif_gop_frag_copy(vif, skb, npo,
                                     skb_frag_page(&skb_shinfo(skb)->frags[i]),
                                     skb_frag_size(&skb_shinfo(skb)->frags[i]),
                                     skb_shinfo(skb)->frags[i].page_offset,
                                     &head,
                                     foreign_vif,
-                                    foreign_grefs[i]);
+                                    foreign_vif ? foreign_gref : UINT_MAX);
        }
 
        return npo->meta_prod - old_meta_prod;
@@ -654,7 +716,7 @@ done:
                notify_remote_via_irq(vif->rx_irq);
 }
 
-void xenvif_check_rx_xenvif(struct xenvif *vif)
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
 {
        int more_to_do;
 
@@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data)
 {
        struct xenvif *vif = (struct xenvif *)data;
        tx_add_credit(vif);
-       xenvif_check_rx_xenvif(vif);
+       xenvif_napi_schedule_or_enable_events(vif);
 }
 
 static void xenvif_tx_err(struct xenvif *vif,
index 6d4ee22708c93791d53860c0243d6a9258678283..32e969d9531909e575a37b80fb0debd3f4ae73d2 100644 (file)
@@ -1831,6 +1831,10 @@ int of_update_property(struct device_node *np, struct property *newprop)
        if (!found)
                return -ENODEV;
 
+       /* At early boot, bail out and defer setup to of_init() */
+       if (!of_kset)
+               return found ? 0 : -ENODEV;
+
        /* Update the sysfs attribute */
        sysfs_remove_bin_file(&np->kobj, &oldprop->attr);
        __of_add_property_sysfs(np, newprop);
index d3d1cfd51e095f058404d96f063df76d227bd652..e384e2534594731a95eb7524fb75583fce208541 100644 (file)
@@ -293,6 +293,58 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
        return PCIBIOS_SUCCESSFUL;
 }
 
+/*
+ * Remove windows, starting from the largest ones to the smallest
+ * ones.
+ */
+static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
+                                  phys_addr_t base, size_t size)
+{
+       while (size) {
+               size_t sz = 1 << (fls(size) - 1);
+
+               mvebu_mbus_del_window(base, sz);
+               base += sz;
+               size -= sz;
+       }
+}
+
+/*
+ * MBus windows can only have a power of two size, but PCI BARs do not
+ * have this constraint. Therefore, we have to split the PCI BAR into
+ * areas each having a power of two size. We start from the largest
+ * one (i.e highest order bit set in the size).
+ */
+static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
+                                  unsigned int target, unsigned int attribute,
+                                  phys_addr_t base, size_t size,
+                                  phys_addr_t remap)
+{
+       size_t size_mapped = 0;
+
+       while (size) {
+               size_t sz = 1 << (fls(size) - 1);
+               int ret;
+
+               ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
+                                                       sz, remap);
+               if (ret) {
+                       dev_err(&port->pcie->pdev->dev,
+                               "Could not create MBus window at 0x%x, size 0x%x: %d\n",
+                               base, sz, ret);
+                       mvebu_pcie_del_windows(port, base - size_mapped,
+                                              size_mapped);
+                       return;
+               }
+
+               size -= sz;
+               size_mapped += sz;
+               base += sz;
+               if (remap != MVEBU_MBUS_NO_REMAP)
+                       remap += sz;
+       }
+}
+
 static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
 {
        phys_addr_t iobase;
@@ -304,8 +356,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
 
                /* If a window was configured, remove it */
                if (port->iowin_base) {
-                       mvebu_mbus_del_window(port->iowin_base,
-                                             port->iowin_size);
+                       mvebu_pcie_del_windows(port, port->iowin_base,
+                                              port->iowin_size);
                        port->iowin_base = 0;
                        port->iowin_size = 0;
                }
@@ -331,11 +383,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
        port->iowin_base = port->pcie->io.start + iobase;
        port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
                            (port->bridge.iolimitupper << 16)) -
-                           iobase);
+                           iobase) + 1;
 
-       mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr,
-                                         port->iowin_base, port->iowin_size,
-                                         iobase);
+       mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
+                              port->iowin_base, port->iowin_size,
+                              iobase);
 }
 
 static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
@@ -346,8 +398,8 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
 
                /* If a window was configured, remove it */
                if (port->memwin_base) {
-                       mvebu_mbus_del_window(port->memwin_base,
-                                             port->memwin_size);
+                       mvebu_pcie_del_windows(port, port->memwin_base,
+                                              port->memwin_size);
                        port->memwin_base = 0;
                        port->memwin_size = 0;
                }
@@ -364,10 +416,11 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
        port->memwin_base  = ((port->bridge.membase & 0xFFF0) << 16);
        port->memwin_size  =
                (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
-               port->memwin_base;
+               port->memwin_base + 1;
 
-       mvebu_mbus_add_window_by_id(port->mem_target, port->mem_attr,
-                                   port->memwin_base, port->memwin_size);
+       mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
+                              port->memwin_base, port->memwin_size,
+                              MVEBU_MBUS_NO_REMAP);
 }
 
 /*
@@ -743,14 +796,21 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
 
        /*
         * On the PCI-to-PCI bridge side, the I/O windows must have at
-        * least a 64 KB size and be aligned on their size, and the
-        * memory windows must have at least a 1 MB size and be
-        * aligned on their size
+        * least a 64 KB size and the memory windows must have at
+        * least a 1 MB size. Moreover, MBus windows need to have a
+        * base address aligned on their size, and their size must be
+        * a power of two. This means that if the BAR doesn't have a
+        * power of two size, several MBus windows will actually be
+        * created. We need to ensure that the biggest MBus window
+        * (which will be the first one) is aligned on its size, which
+        * explains the rounddown_pow_of_two() being done here.
         */
        if (res->flags & IORESOURCE_IO)
-               return round_up(start, max_t(resource_size_t, SZ_64K, size));
+               return round_up(start, max_t(resource_size_t, SZ_64K,
+                                            rounddown_pow_of_two(size)));
        else if (res->flags & IORESOURCE_MEM)
-               return round_up(start, max_t(resource_size_t, SZ_1M, size));
+               return round_up(start, max_t(resource_size_t, SZ_1M,
+                                            rounddown_pow_of_two(size)));
        else
                return start;
 }
index 58499277903a4ab4a6225982d88876de399120e2..6efc2ec5e4db0823758a409eb95c2d3054a8ba48 100644 (file)
@@ -282,8 +282,8 @@ static int board_added(struct slot *p_slot)
                return WRONG_BUS_FREQUENCY;
        }
 
-       bsp = ctrl->pci_dev->bus->cur_bus_speed;
-       msp = ctrl->pci_dev->bus->max_bus_speed;
+       bsp = ctrl->pci_dev->subordinate->cur_bus_speed;
+       msp = ctrl->pci_dev->subordinate->max_bus_speed;
 
        /* Check if there are other slots or devices on the same bus */
        if (!list_empty(&ctrl->pci_dev->subordinate->devices))
index 7325d43bf030ce65d5f386f6aeeeb3bfa4d5c482..759475ef6ff3206bd04103043cfed21092766493 100644 (file)
@@ -3067,7 +3067,8 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
        if (!pci_is_pcie(dev))
                return 1;
 
-       return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND);
+       return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
+                                   PCI_EXP_DEVSTA_TRPND);
 }
 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
 
@@ -3109,7 +3110,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
                return 0;
 
        /* Wait for Transaction Pending bit clean */
-       if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP))
+       if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP))
                goto clear;
 
        dev_err(&dev->dev, "transaction is not cleared; "
index 9802b67040cc6a49a1e4f8df15db337ae912b778..2c61281bebd7666f6b12e87a7618c47a3a3aae18 100644 (file)
@@ -523,17 +523,6 @@ static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
                return GPIOF_DIR_IN;
 }
 
-static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
-static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
-                                    int value)
-{
-       return pinctrl_gpio_direction_output(chip->base + offset);
-}
-
 static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset)
 {
        struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev);
@@ -568,6 +557,18 @@ static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset,
                wmt_clearbits(data, reg_data_out, BIT(bit));
 }
 
+static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+       return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+                                    int value)
+{
+       wmt_gpio_set_value(chip, offset, value);
+       return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
 static struct gpio_chip wmt_gpio_chip = {
        .label = "gpio-wmt",
        .owner = THIS_MODULE,
index deb7f4bcdb7b6b6a770ce08f1d2535dccfa82100..438d4c72c7b36c27982ed8e60b9a42cbff58dc8f 100644 (file)
@@ -37,7 +37,7 @@ __visible struct {
  * kernel begins at offset 3GB...
  */
 
-asmlinkage void pnp_bios_callfunc(void);
+asmlinkage __visible void pnp_bios_callfunc(void);
 
 __asm__(".text                 \n"
        __ALIGN_STR "\n"
index 6963bdf5417593921122694d8ae425ff7a599f7e..6aea373547f65f3743faa7236b5035a83e178966 100644 (file)
@@ -6,6 +6,7 @@ menu "PTP clock support"
 
 config PTP_1588_CLOCK
        tristate "PTP clock support"
+       depends on NET
        select PPS
        select NET_PTP_CLASSIFY
        help
@@ -74,7 +75,7 @@ config DP83640_PHY
 config PTP_1588_CLOCK_PCH
        tristate "Intel PCH EG20T as PTP clock"
        depends on X86 || COMPILE_TEST
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && NET
        select PTP_1588_CLOCK
        help
          This driver adds support for using the PCH EG20T as a PTP
index bd628a6f981d8b550c7e651c845232fcaf5a57ff..e5f13c4310feb368b5855269a043c88bc698da74 100644 (file)
@@ -569,6 +569,9 @@ static int hym8563_probe(struct i2c_client *client,
        if (IS_ERR(hym8563->rtc))
                return PTR_ERR(hym8563->rtc);
 
+       /* the hym8563 alarm only supports a minute accuracy */
+       hym8563->rtc->uie_unsupported = 1;
+
 #ifdef CONFIG_COMMON_CLK
        hym8563_clkout_register_clk(hym8563);
 #endif
index 1b681427dde045cdad8f2455c07467fba4f077f1..c341f855fadcd433a6730db0d941317615a7fe9e 100644 (file)
@@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy)
        list_del(&rphy->list);
        mutex_unlock(&sas_host->lock);
 
-       sas_bsg_remove(shost, rphy);
-
        transport_destroy_device(dev);
 
        put_device(dev);
@@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
        }
 
        sas_rphy_unlink(rphy);
+       sas_bsg_remove(NULL, rphy);
        transport_remove_device(dev);
        device_del(dev);
 }
index fc67f564f02cf77eec2350f4435836d409020314..788ed9b59b4e3f04c3a485fefe6d31dfdb6b681f 100644 (file)
@@ -1,10 +1,12 @@
 #
 # Makefile for the SuperH specific drivers.
 #
-obj-y  := intc/
+obj-$(CONFIG_SUPERH)                   += intc/
+obj-$(CONFIG_ARCH_SHMOBILE_LEGACY)     += intc/
+ifneq ($(CONFIG_COMMON_CLK),y)
+obj-$(CONFIG_HAVE_CLK)                 += clk/
+endif
+obj-$(CONFIG_MAPLE)                    += maple/
+obj-$(CONFIG_SUPERHYWAY)               += superhyway/
 
-obj-$(CONFIG_HAVE_CLK)         += clk/
-obj-$(CONFIG_MAPLE)            += maple/
-obj-$(CONFIG_SUPERHYWAY)       += superhyway/
-
-obj-y                          += pm_runtime.o
+obj-y                                  += pm_runtime.o
index 8afa5a4589f2dd03771acaac8be585447e8033c0..10c65eb51f8587ca79b15291a75c04c16e4604a2 100644 (file)
@@ -50,8 +50,25 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
        .con_ids = { NULL, },
 };
 
+static bool default_pm_on;
+
 static int __init sh_pm_runtime_init(void)
 {
+       if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
+               if (!of_machine_is_compatible("renesas,emev2") &&
+                   !of_machine_is_compatible("renesas,r7s72100") &&
+                   !of_machine_is_compatible("renesas,r8a73a4") &&
+                   !of_machine_is_compatible("renesas,r8a7740") &&
+                   !of_machine_is_compatible("renesas,r8a7778") &&
+                   !of_machine_is_compatible("renesas,r8a7779") &&
+                   !of_machine_is_compatible("renesas,r8a7790") &&
+                   !of_machine_is_compatible("renesas,r8a7791") &&
+                   !of_machine_is_compatible("renesas,sh7372") &&
+                   !of_machine_is_compatible("renesas,sh73a0"))
+                       return 0;
+       }
+
+       default_pm_on = true;
        pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
        return 0;
 }
@@ -59,7 +76,8 @@ core_initcall(sh_pm_runtime_init);
 
 static int __init sh_pm_runtime_late_init(void)
 {
-       pm_genpd_poweroff_unused();
+       if (default_pm_on)
+               pm_genpd_poweroff_unused();
        return 0;
 }
 late_initcall(sh_pm_runtime_late_init);
index 713af4806f265e10b87dcfade6b6989055c2f283..f6759dc0153b4a8c45fb83f7660e34be2a47641b 100644 (file)
@@ -29,18 +29,6 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
        struct sg_table *sgt;
        void *buf, *pbuf;
 
-       /*
-        * Some DMA controllers have problems transferring buffers that are
-        * not multiple of 4 bytes. So we truncate the transfer so that it
-        * is suitable for such controllers, and handle the trailing bytes
-        * manually after the DMA completes.
-        *
-        * REVISIT: It would be better if this information could be
-        * retrieved directly from the DMA device in a similar way than
-        * ->copy_align etc. is done.
-        */
-       len = ALIGN(drv_data->len, 4);
-
        if (dir == DMA_TO_DEVICE) {
                dmadev = drv_data->tx_chan->device->dev;
                sgt = &drv_data->tx_sgt;
@@ -144,12 +132,8 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
                if (!error) {
                        pxa2xx_spi_unmap_dma_buffers(drv_data);
 
-                       /* Handle the last bytes of unaligned transfer */
                        drv_data->tx += drv_data->tx_map_len;
-                       drv_data->write(drv_data);
-
                        drv_data->rx += drv_data->rx_map_len;
-                       drv_data->read(drv_data);
 
                        msg->actual_length += drv_data->len;
                        msg->state = pxa2xx_spi_next_transfer(drv_data);
index b032e8885e2435b3585810f1266bca9aa3fce6a8..78c66e3c53ed5f88d8d79ac3c16cdb4800af71f0 100644 (file)
@@ -734,7 +734,7 @@ static int spi_qup_remove(struct platform_device *pdev)
        int ret;
 
        ret = pm_runtime_get_sync(&pdev->dev);
-       if (ret)
+       if (ret < 0)
                return ret;
 
        ret = spi_qup_set_state(controller, QUP_STATE_RESET);
index 4eb9bf02996cf179cf3e6365421867aaf8a10593..939edf473235dca2fb7692fe0fec6dbdd649bbd0 100644 (file)
@@ -580,6 +580,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
                spi->master->set_cs(spi, !enable);
 }
 
+#ifdef CONFIG_HAS_DMA
 static int spi_map_buf(struct spi_master *master, struct device *dev,
                       struct sg_table *sgt, void *buf, size_t len,
                       enum dma_data_direction dir)
@@ -637,55 +638,12 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev,
        }
 }
 
-static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
 {
        struct device *tx_dev, *rx_dev;
        struct spi_transfer *xfer;
-       void *tmp;
-       unsigned int max_tx, max_rx;
        int ret;
 
-       if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
-               max_tx = 0;
-               max_rx = 0;
-
-               list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-                       if ((master->flags & SPI_MASTER_MUST_TX) &&
-                           !xfer->tx_buf)
-                               max_tx = max(xfer->len, max_tx);
-                       if ((master->flags & SPI_MASTER_MUST_RX) &&
-                           !xfer->rx_buf)
-                               max_rx = max(xfer->len, max_rx);
-               }
-
-               if (max_tx) {
-                       tmp = krealloc(master->dummy_tx, max_tx,
-                                      GFP_KERNEL | GFP_DMA);
-                       if (!tmp)
-                               return -ENOMEM;
-                       master->dummy_tx = tmp;
-                       memset(tmp, 0, max_tx);
-               }
-
-               if (max_rx) {
-                       tmp = krealloc(master->dummy_rx, max_rx,
-                                      GFP_KERNEL | GFP_DMA);
-                       if (!tmp)
-                               return -ENOMEM;
-                       master->dummy_rx = tmp;
-               }
-
-               if (max_tx || max_rx) {
-                       list_for_each_entry(xfer, &msg->transfers,
-                                           transfer_list) {
-                               if (!xfer->tx_buf)
-                                       xfer->tx_buf = master->dummy_tx;
-                               if (!xfer->rx_buf)
-                                       xfer->rx_buf = master->dummy_rx;
-                       }
-               }
-       }
-
        if (!master->can_dma)
                return 0;
 
@@ -742,6 +700,69 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
 
        return 0;
 }
+#else /* !CONFIG_HAS_DMA */
+static inline int __spi_map_msg(struct spi_master *master,
+                               struct spi_message *msg)
+{
+       return 0;
+}
+
+static inline int spi_unmap_msg(struct spi_master *master,
+                               struct spi_message *msg)
+{
+       return 0;
+}
+#endif /* !CONFIG_HAS_DMA */
+
+static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+{
+       struct spi_transfer *xfer;
+       void *tmp;
+       unsigned int max_tx, max_rx;
+
+       if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
+               max_tx = 0;
+               max_rx = 0;
+
+               list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+                       if ((master->flags & SPI_MASTER_MUST_TX) &&
+                           !xfer->tx_buf)
+                               max_tx = max(xfer->len, max_tx);
+                       if ((master->flags & SPI_MASTER_MUST_RX) &&
+                           !xfer->rx_buf)
+                               max_rx = max(xfer->len, max_rx);
+               }
+
+               if (max_tx) {
+                       tmp = krealloc(master->dummy_tx, max_tx,
+                                      GFP_KERNEL | GFP_DMA);
+                       if (!tmp)
+                               return -ENOMEM;
+                       master->dummy_tx = tmp;
+                       memset(tmp, 0, max_tx);
+               }
+
+               if (max_rx) {
+                       tmp = krealloc(master->dummy_rx, max_rx,
+                                      GFP_KERNEL | GFP_DMA);
+                       if (!tmp)
+                               return -ENOMEM;
+                       master->dummy_rx = tmp;
+               }
+
+               if (max_tx || max_rx) {
+                       list_for_each_entry(xfer, &msg->transfers,
+                                           transfer_list) {
+                               if (!xfer->tx_buf)
+                                       xfer->tx_buf = master->dummy_tx;
+                               if (!xfer->rx_buf)
+                                       xfer->rx_buf = master->dummy_rx;
+                       }
+               }
+       }
+
+       return __spi_map_msg(master, msg);
+}
 
 /*
  * spi_transfer_one_message - Default implementation of transfer_one_message()
@@ -1151,7 +1172,6 @@ static int spi_master_initialize_queue(struct spi_master *master)
 {
        int ret;
 
-       master->queued = true;
        master->transfer = spi_queued_transfer;
        if (!master->transfer_one_message)
                master->transfer_one_message = spi_transfer_one_message;
@@ -1162,6 +1182,7 @@ static int spi_master_initialize_queue(struct spi_master *master)
                dev_err(&master->dev, "problem initializing queue\n");
                goto err_init_queue;
        }
+       master->queued = true;
        ret = spi_start_queue(master);
        if (ret) {
                dev_err(&master->dev, "problem starting queue\n");
@@ -1171,8 +1192,8 @@ static int spi_master_initialize_queue(struct spi_master *master)
        return 0;
 
 err_start_queue:
-err_init_queue:
        spi_destroy_queue(master);
+err_init_queue:
        return ret;
 }
 
@@ -1756,7 +1777,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
  */
 int spi_setup(struct spi_device *spi)
 {
-       unsigned        bad_bits;
+       unsigned        bad_bits, ugly_bits;
        int             status = 0;
 
        /* check mode to prevent that DUAL and QUAD set at the same time
@@ -1776,6 +1797,15 @@ int spi_setup(struct spi_device *spi)
         * that aren't supported with their current master
         */
        bad_bits = spi->mode & ~spi->master->mode_bits;
+       ugly_bits = bad_bits &
+                   (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
+       if (ugly_bits) {
+               dev_warn(&spi->dev,
+                        "setup: ignoring unsupported mode bits %x\n",
+                        ugly_bits);
+               spi->mode &= ~ugly_bits;
+               bad_bits &= ~ugly_bits;
+       }
        if (bad_bits) {
                dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
                        bad_bits);
index 4144a75e5f71bc6258d9d8dcdc1a02b3b2a2f035..c270c9ae6d27711d531ebb54a07b1cdc125be9da 100644 (file)
@@ -517,7 +517,7 @@ int imx_drm_encoder_get_mux_id(struct device_node *node,
                of_node_put(port);
                if (port == imx_crtc->port) {
                        ret = of_graph_parse_endpoint(ep, &endpoint);
-                       return ret ? ret : endpoint.id;
+                       return ret ? ret : endpoint.port;
                }
        } while (ep);
 
@@ -675,6 +675,11 @@ static int imx_drm_platform_probe(struct platform_device *pdev)
                        if (!remote || !of_device_is_available(remote)) {
                                of_node_put(remote);
                                continue;
+                       } else if (!of_device_is_available(remote->parent)) {
+                               dev_warn(&pdev->dev, "parent device of %s is not available\n",
+                                        remote->full_name);
+                               of_node_put(remote);
+                               continue;
                        }
 
                        ret = imx_drm_add_component(&pdev->dev, remote);
index 575533f4fd64fc7d53d38c4a0cb307fa429f9f68..a23f4f773146a8891925165622cb5167d30252df 100644 (file)
@@ -582,7 +582,7 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
        tve->dev = dev;
        spin_lock_init(&tve->lock);
 
-       ddc_node = of_parse_phandle(np, "i2c-ddc-bus", 0);
+       ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
        if (ddc_node) {
                tve->ddc = of_find_i2c_adapter_by_node(ddc_node);
                of_node_put(ddc_node);
index 8c101cbbee97646d2b06166865381bf9144df2cb..acc8184c46cde0d85ebeccd41c000b67ae9da429 100644 (file)
@@ -1247,9 +1247,18 @@ static int vpfe_stop_streaming(struct vb2_queue *vq)
        struct vpfe_fh *fh = vb2_get_drv_priv(vq);
        struct vpfe_video_device *video = fh->video;
 
-       if (!vb2_is_streaming(vq))
-               return 0;
        /* release all active buffers */
+       if (video->cur_frm == video->next_frm) {
+               vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_ERROR);
+       } else {
+               if (video->cur_frm != NULL)
+                       vb2_buffer_done(&video->cur_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+               if (video->next_frm != NULL)
+                       vb2_buffer_done(&video->next_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+       }
+
        while (!list_empty(&video->dma_queue)) {
                video->next_frm = list_entry(video->dma_queue.next,
                                                struct vpfe_cap_buffer, list);
index b3d2cc729657df34e57bc300033bafc1baf2bdd6..4ba569258498b9d6248de4d58d13c63750c68561 100644 (file)
@@ -48,10 +48,8 @@ static const struct usb_device_id sn9c102_id_table[] = {
        { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), },
 /*     { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */
        { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), },
-#endif
        { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), },
        { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), },
-#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
        { SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), },
        { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), },
        { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
index 57eca7a45672b94674677f88cfef688ca403c386..4fe751f7c2bf2438f0e49054731d911c4e84d239 100644 (file)
@@ -953,8 +953,6 @@ static int netdev_close(struct net_device *pnetdev)
 #endif /* CONFIG_8723AU_P2P */
 
        rtw_scan_abort23a(padapter);
-        /* set this at the end */
-       padapter->rtw_wdev->iftype = NL80211_IFTYPE_MONITOR;
 
        RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n"));
        DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup);
index c49160e477d8d0245392fea0db2a2d432c186b2f..07e542e5d1562b18fa3caad2903b0928dccdd1e2 100644 (file)
@@ -26,7 +26,7 @@ unsigned int ffaddr2pipehdl23a(struct dvobj_priv *pdvobj, u32 addr)
        if (addr == RECV_BULK_IN_ADDR) {
                pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]);
        } else if (addr == RECV_INT_IN_ADDR) {
-               pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]);
+               pipe = usb_rcvintpipe(pusbd, pdvobj->RtInPipe[1]);
        } else if (addr < HW_QUEUE_ENTRY) {
                ep_num = pdvobj->Queue2Pipe[addr];
                pipe = usb_sndbulkpipe(pusbd, ep_num);
index 78cab13bbb1be3796b0e00af4a0667329ed4a2d8..46588c85d39bd0ce206f213aebba9330db7e33b8 100644 (file)
@@ -1593,7 +1593,9 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
         * Initiator is expecting a NopIN ping reply..
         */
        if (hdr->itt != RESERVED_ITT) {
-               BUG_ON(!cmd);
+               if (!cmd)
+                       return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+                                               (unsigned char *)hdr);
 
                spin_lock_bh(&conn->cmd_lock);
                list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
index 6960f22909ae2eeddd651d59217d1f09519e9154..302eb3b7871558bb2a1241fedc521e20efebf415 100644 (file)
@@ -775,6 +775,7 @@ struct iscsi_np {
        int                     np_ip_proto;
        int                     np_sock_type;
        enum np_thread_state_table np_thread_state;
+       bool                    enabled;
        enum iscsi_timer_flags_table np_login_timer_flags;
        u32                     np_exports;
        enum np_flags_table     np_flags;
index 8739b98f6f93539b8c6eb95f27d7fde3601b40d7..ca31fa1b8a4b69058290243bc61b0007d2c7d3cc 100644 (file)
@@ -436,7 +436,7 @@ static int iscsi_login_zero_tsih_s2(
                }
                off = mrdsl % PAGE_SIZE;
                if (!off)
-                       return 0;
+                       goto check_prot;
 
                if (mrdsl < PAGE_SIZE)
                        mrdsl = PAGE_SIZE;
@@ -452,6 +452,31 @@ static int iscsi_login_zero_tsih_s2(
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                        return -1;
                }
+               /*
+                * ISER currently requires that ImmediateData + Unsolicited
+                * Data be disabled when protection / signature MRs are enabled.
+                */
+check_prot:
+               if (sess->se_sess->sup_prot_ops &
+                  (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS |
+                   TARGET_PROT_DOUT_INSERT)) {
+
+                       sprintf(buf, "ImmediateData=No");
+                       if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+                               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+                                                   ISCSI_LOGIN_STATUS_NO_RESOURCES);
+                               return -1;
+                       }
+
+                       sprintf(buf, "InitialR2T=Yes");
+                       if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+                               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+                                                   ISCSI_LOGIN_STATUS_NO_RESOURCES);
+                               return -1;
+                       }
+                       pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for"
+                                " T10-PI enabled ISER session\n");
+               }
        }
 
        return 0;
@@ -984,6 +1009,7 @@ int iscsi_target_setup_login_socket(
        }
 
        np->np_transport = t;
+       np->enabled = true;
        return 0;
 }
 
index eb96b20dc09e13ffe32e226df38b73241f176a63..ca1811858afd01fa4b09ba6e1523f32bd0c3e1b2 100644 (file)
@@ -184,6 +184,7 @@ static void iscsit_clear_tpg_np_login_thread(
                return;
        }
 
+       tpg_np->tpg_np->enabled = false;
        iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
 }
 
index 65001e1336702966108081443d5a44f39988d5af..26416c15d65c25c1b915f6bdf00b03db341b9152 100644 (file)
@@ -798,10 +798,10 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
                pr_err("emulate_write_cache not supported for pSCSI\n");
                return -EINVAL;
        }
-       if (dev->transport->get_write_cache) {
-               pr_warn("emulate_write_cache cannot be changed when underlying"
-                       " HW reports WriteCacheEnabled, ignoring request\n");
-               return 0;
+       if (flag &&
+           dev->transport->get_write_cache) {
+               pr_err("emulate_write_cache not supported for this device\n");
+               return -EINVAL;
        }
 
        dev->dev_attrib.emulate_write_cache = flag;
@@ -936,6 +936,10 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
                return 0;
        }
        if (!dev->transport->init_prot || !dev->transport->free_prot) {
+               /* 0 is only allowed value for non-supporting backends */
+               if (flag == 0)
+                       return 0;
+
                pr_err("DIF protection not supported by backend: %s\n",
                       dev->transport->name);
                return -ENOSYS;
index d4b98690a73680244676b6e608ede6c85ff724cb..789aa9eb0a1e590b8853a49e8f0ae137f04d3241 100644 (file)
@@ -1113,6 +1113,7 @@ void transport_init_se_cmd(
        init_completion(&cmd->cmd_wait_comp);
        init_completion(&cmd->task_stop_comp);
        spin_lock_init(&cmd->t_state_lock);
+       kref_init(&cmd->cmd_kref);
        cmd->transport_state = CMD_T_DEV_ACTIVE;
 
        cmd->se_tfo = tfo;
@@ -2357,7 +2358,6 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
        unsigned long flags;
        int ret = 0;
 
-       kref_init(&se_cmd->cmd_kref);
        /*
         * Add a second kref if the fabric caller is expecting to handle
         * fabric acknowledgement that requires two target_put_sess_cmd()
index 01cf37f212c30724ed6a0addbe8c7cbe69dfd6a3..f5fd515b2bee266dd9c8279ea804bc7372d955f3 100644 (file)
@@ -90,18 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd)
 {
        struct fc_frame *fp;
        struct fc_lport *lport;
-       struct se_session *se_sess;
+       struct ft_sess *sess;
 
        if (!cmd)
                return;
-       se_sess = cmd->sess->se_sess;
+       sess = cmd->sess;
        fp = cmd->req_frame;
        lport = fr_dev(fp);
        if (fr_seq(fp))
                lport->tt.seq_release(fr_seq(fp));
        fc_frame_free(fp);
-       percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
-       ft_sess_put(cmd->sess); /* undo get from lookup at recv */
+       percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+       ft_sess_put(sess);      /* undo get from lookup at recv */
 }
 
 void ft_release_cmd(struct se_cmd *se_cmd)
index 96109a9972b6113cdb88d1861bf00353a00a0a93..84b4bfb843443ef934d2d80abc12131bb8616a5a 100644 (file)
@@ -66,7 +66,22 @@ static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue);
 static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly;
 static unsigned event_array_pages __read_mostly;
 
+/*
+ * sync_set_bit() and friends must be unsigned long aligned on non-x86
+ * platforms.
+ */
+#if !defined(CONFIG_X86) && BITS_PER_LONG > 32
+
+#define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL)
+#define EVTCHN_FIFO_BIT(b, w) \
+    (((unsigned long)w & 0x4UL) ? (EVTCHN_FIFO_ ##b + 32) : EVTCHN_FIFO_ ##b)
+
+#else
+
 #define BM(w) ((unsigned long *)(w))
+#define EVTCHN_FIFO_BIT(b, w) EVTCHN_FIFO_ ##b
+
+#endif
 
 static inline event_word_t *event_word_from_port(unsigned port)
 {
@@ -161,33 +176,38 @@ static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
 static void evtchn_fifo_clear_pending(unsigned port)
 {
        event_word_t *word = event_word_from_port(port);
-       sync_clear_bit(EVTCHN_FIFO_PENDING, BM(word));
+       sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
 }
 
 static void evtchn_fifo_set_pending(unsigned port)
 {
        event_word_t *word = event_word_from_port(port);
-       sync_set_bit(EVTCHN_FIFO_PENDING, BM(word));
+       sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
 }
 
 static bool evtchn_fifo_is_pending(unsigned port)
 {
        event_word_t *word = event_word_from_port(port);
-       return sync_test_bit(EVTCHN_FIFO_PENDING, BM(word));
+       return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
 }
 
 static bool evtchn_fifo_test_and_set_mask(unsigned port)
 {
        event_word_t *word = event_word_from_port(port);
-       return sync_test_and_set_bit(EVTCHN_FIFO_MASKED, BM(word));
+       return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
 }
 
 static void evtchn_fifo_mask(unsigned port)
 {
        event_word_t *word = event_word_from_port(port);
-       sync_set_bit(EVTCHN_FIFO_MASKED, BM(word));
+       sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
 }
 
+static bool evtchn_fifo_is_masked(unsigned port)
+{
+       event_word_t *word = event_word_from_port(port);
+       return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
+}
 /*
  * Clear MASKED, spinning if BUSY is set.
  */
@@ -211,7 +231,7 @@ static void evtchn_fifo_unmask(unsigned port)
        BUG_ON(!irqs_disabled());
 
        clear_masked(word);
-       if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))) {
+       if (evtchn_fifo_is_pending(port)) {
                struct evtchn_unmask unmask = { .port = port };
                (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
        }
@@ -243,7 +263,7 @@ static void handle_irq_for_port(unsigned port)
 
 static void consume_one_event(unsigned cpu,
                              struct evtchn_fifo_control_block *control_block,
-                             unsigned priority, uint32_t *ready)
+                             unsigned priority, unsigned long *ready)
 {
        struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
        uint32_t head;
@@ -273,10 +293,9 @@ static void consume_one_event(unsigned cpu,
         * copy of the ready word.
         */
        if (head == 0)
-               clear_bit(priority, BM(ready));
+               clear_bit(priority, ready);
 
-       if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))
-           && !sync_test_bit(EVTCHN_FIFO_MASKED, BM(word)))
+       if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
                handle_irq_for_port(port);
 
        q->head[priority] = head;
@@ -285,7 +304,7 @@ static void consume_one_event(unsigned cpu,
 static void evtchn_fifo_handle_events(unsigned cpu)
 {
        struct evtchn_fifo_control_block *control_block;
-       uint32_t ready;
+       unsigned long ready;
        unsigned q;
 
        control_block = per_cpu(cpu_control_block, cpu);
index 1c8c6cc6de3097ceab15a5a16307e0b569ee6a29..4b0eff6da6740552043679764f0ebc76a47917a0 100644 (file)
@@ -130,6 +130,15 @@ static void afs_cm_destructor(struct afs_call *call)
 {
        _enter("");
 
+       /* Break the callbacks here so that we do it after the final ACK is
+        * received.  The step number here must match the final number in
+        * afs_deliver_cb_callback().
+        */
+       if (call->unmarshall == 6) {
+               ASSERT(call->server && call->count && call->request);
+               afs_break_callbacks(call->server, call->count, call->request);
+       }
+
        afs_put_server(call->server);
        call->server = NULL;
        kfree(call->buffer);
@@ -272,6 +281,16 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
                _debug("trailer");
                if (skb->len != 0)
                        return -EBADMSG;
+
+               /* Record that the message was unmarshalled successfully so
+                * that the call destructor can know do the callback breaking
+                * work, even if the final ACK isn't received.
+                *
+                * If the step number changes, then afs_cm_destructor() must be
+                * updated also.
+                */
+               call->unmarshall++;
+       case 6:
                break;
        }
 
index d2f91bd615a9304f905d8712fda5d284268ba5be..71d5982312f3d11dd6e3dd23079e5c6bef7c23a6 100644 (file)
@@ -75,7 +75,7 @@ struct afs_call {
        const struct afs_call_type *type;       /* type of call */
        const struct afs_wait_mode *wait_mode;  /* completion wait mode */
        wait_queue_head_t       waitq;          /* processes awaiting completion */
-       work_func_t             async_workfn;
+       void (*async_workfn)(struct afs_call *call); /* asynchronous work function */
        struct work_struct      async_work;     /* asynchronous work processor */
        struct work_struct      work;           /* actual work processor */
        struct sk_buff_head     rx_queue;       /* received packets */
index ef943df73b8cdee2c6964439b81417a9c1110b12..03a3beb170048df40c436dff51c41373104cf9e7 100644 (file)
@@ -25,7 +25,7 @@ static void afs_wake_up_call_waiter(struct afs_call *);
 static int afs_wait_for_call_to_complete(struct afs_call *);
 static void afs_wake_up_async_call(struct afs_call *);
 static int afs_dont_wait_for_call_to_complete(struct afs_call *);
-static void afs_process_async_call(struct work_struct *);
+static void afs_process_async_call(struct afs_call *);
 static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *);
 static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool);
 
@@ -58,6 +58,13 @@ static void afs_collect_incoming_call(struct work_struct *);
 static struct sk_buff_head afs_incoming_calls;
 static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call);
 
+static void afs_async_workfn(struct work_struct *work)
+{
+       struct afs_call *call = container_of(work, struct afs_call, async_work);
+
+       call->async_workfn(call);
+}
+
 /*
  * open an RxRPC socket and bind it to be a server for callback notifications
  * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
@@ -183,6 +190,28 @@ static void afs_free_call(struct afs_call *call)
        kfree(call);
 }
 
+/*
+ * End a call but do not free it
+ */
+static void afs_end_call_nofree(struct afs_call *call)
+{
+       if (call->rxcall) {
+               rxrpc_kernel_end_call(call->rxcall);
+               call->rxcall = NULL;
+       }
+       if (call->type->destructor)
+               call->type->destructor(call);
+}
+
+/*
+ * End a call and free it
+ */
+static void afs_end_call(struct afs_call *call)
+{
+       afs_end_call_nofree(call);
+       afs_free_call(call);
+}
+
 /*
  * allocate a call with flat request and reply buffers
  */
@@ -326,7 +355,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
               atomic_read(&afs_outstanding_calls));
 
        call->wait_mode = wait_mode;
-       INIT_WORK(&call->async_work, afs_process_async_call);
+       call->async_workfn = afs_process_async_call;
+       INIT_WORK(&call->async_work, afs_async_workfn);
 
        memset(&srx, 0, sizeof(srx));
        srx.srx_family = AF_RXRPC;
@@ -383,11 +413,8 @@ error_do_abort:
        rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
        while ((skb = skb_dequeue(&call->rx_queue)))
                afs_free_skb(skb);
-       rxrpc_kernel_end_call(rxcall);
-       call->rxcall = NULL;
 error_kill_call:
-       call->type->destructor(call);
-       afs_free_call(call);
+       afs_end_call(call);
        _leave(" = %d", ret);
        return ret;
 }
@@ -509,12 +536,8 @@ static void afs_deliver_to_call(struct afs_call *call)
        if (call->state >= AFS_CALL_COMPLETE) {
                while ((skb = skb_dequeue(&call->rx_queue)))
                        afs_free_skb(skb);
-               if (call->incoming) {
-                       rxrpc_kernel_end_call(call->rxcall);
-                       call->rxcall = NULL;
-                       call->type->destructor(call);
-                       afs_free_call(call);
-               }
+               if (call->incoming)
+                       afs_end_call(call);
        }
 
        _leave("");
@@ -564,10 +587,7 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
        }
 
        _debug("call complete");
-       rxrpc_kernel_end_call(call->rxcall);
-       call->rxcall = NULL;
-       call->type->destructor(call);
-       afs_free_call(call);
+       afs_end_call(call);
        _leave(" = %d", ret);
        return ret;
 }
@@ -603,11 +623,8 @@ static int afs_dont_wait_for_call_to_complete(struct afs_call *call)
 /*
  * delete an asynchronous call
  */
-static void afs_delete_async_call(struct work_struct *work)
+static void afs_delete_async_call(struct afs_call *call)
 {
-       struct afs_call *call =
-               container_of(work, struct afs_call, async_work);
-
        _enter("");
 
        afs_free_call(call);
@@ -620,11 +637,8 @@ static void afs_delete_async_call(struct work_struct *work)
  * - on a multiple-thread workqueue this work item may try to run on several
  *   CPUs at the same time
  */
-static void afs_process_async_call(struct work_struct *work)
+static void afs_process_async_call(struct afs_call *call)
 {
-       struct afs_call *call =
-               container_of(work, struct afs_call, async_work);
-
        _enter("");
 
        if (!skb_queue_empty(&call->rx_queue))
@@ -637,10 +651,7 @@ static void afs_process_async_call(struct work_struct *work)
                call->reply = NULL;
 
                /* kill the call */
-               rxrpc_kernel_end_call(call->rxcall);
-               call->rxcall = NULL;
-               if (call->type->destructor)
-                       call->type->destructor(call);
+               afs_end_call_nofree(call);
 
                /* we can't just delete the call because the work item may be
                 * queued */
@@ -663,13 +674,6 @@ void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
        call->reply_size += len;
 }
 
-static void afs_async_workfn(struct work_struct *work)
-{
-       struct afs_call *call = container_of(work, struct afs_call, async_work);
-
-       call->async_workfn(work);
-}
-
 /*
  * accept the backlog of incoming calls
  */
@@ -790,10 +794,7 @@ void afs_send_empty_reply(struct afs_call *call)
                _debug("oom");
                rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
        default:
-               rxrpc_kernel_end_call(call->rxcall);
-               call->rxcall = NULL;
-               call->type->destructor(call);
-               afs_free_call(call);
+               afs_end_call(call);
                _leave(" [error]");
                return;
        }
@@ -823,17 +824,16 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
        call->state = AFS_CALL_AWAIT_ACK;
        n = rxrpc_kernel_send_data(call->rxcall, &msg, len);
        if (n >= 0) {
+               /* Success */
                _leave(" [replied]");
                return;
        }
+
        if (n == -ENOMEM) {
                _debug("oom");
                rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
        }
-       rxrpc_kernel_end_call(call->rxcall);
-       call->rxcall = NULL;
-       call->type->destructor(call);
-       afs_free_call(call);
+       afs_end_call(call);
        _leave(" [error]");
 }
 
index 2ad7de94efef71e58af4e747cd05d3cb3aeeba40..2f6d7b13b5bdacaba4df505b57b1c3cd9417ee13 100644 (file)
@@ -3120,6 +3120,8 @@ process_slot:
                        } else if (type == BTRFS_FILE_EXTENT_INLINE) {
                                u64 skip = 0;
                                u64 trim = 0;
+                               u64 aligned_end = 0;
+
                                if (off > key.offset) {
                                        skip = off - key.offset;
                                        new_key.offset += skip;
@@ -3136,9 +3138,11 @@ process_slot:
                                size -= skip + trim;
                                datal -= skip + trim;
 
+                               aligned_end = ALIGN(new_key.offset + datal,
+                                                   root->sectorsize);
                                ret = btrfs_drop_extents(trans, root, inode,
                                                         new_key.offset,
-                                                        new_key.offset + datal,
+                                                        aligned_end,
                                                         1);
                                if (ret) {
                                        if (ret != -EOPNOTSUPP)
index eb6537a08c1bf4438f0bc90df319977cb964bee7..fd38b5053479cf62f3297d43038288027646ebe0 100644 (file)
@@ -1668,7 +1668,7 @@ static int get_first_ref(struct btrfs_root *root, u64 ino,
                goto out;
        }
 
-       if (key.type == BTRFS_INODE_REF_KEY) {
+       if (found_key.type == BTRFS_INODE_REF_KEY) {
                struct btrfs_inode_ref *iref;
                iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
                                      struct btrfs_inode_ref);
index aadc2b68678b7d70c0381d10c847c86624589c4c..a22d667f1069e5eb8485d121f241725de732b8a9 100644 (file)
@@ -1737,6 +1737,9 @@ cifs_inode_needs_reval(struct inode *inode)
        if (cifs_i->time == 0)
                return true;
 
+       if (!cifs_sb->actimeo)
+               return true;
+
        if (!time_in_range(jiffies, cifs_i->time,
                                cifs_i->time + cifs_sb->actimeo))
                return true;
index 42ae01eefc0767902b4827ef4113199fd6e57a1a..be2bea834bf459563e2d03e1e0fa8dec97b49c70 100644 (file)
@@ -441,42 +441,12 @@ void d_drop(struct dentry *dentry)
 }
 EXPORT_SYMBOL(d_drop);
 
-/*
- * Finish off a dentry we've decided to kill.
- * dentry->d_lock must be held, returns with it unlocked.
- * If ref is non-zero, then decrement the refcount too.
- * Returns dentry requiring refcount drop, or NULL if we're done.
- */
-static struct dentry *
-dentry_kill(struct dentry *dentry, int unlock_on_failure)
-       __releases(dentry->d_lock)
+static void __dentry_kill(struct dentry *dentry)
 {
-       struct inode *inode;
        struct dentry *parent = NULL;
        bool can_free = true;
-
-       if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
-               can_free = dentry->d_flags & DCACHE_MAY_FREE;
-               spin_unlock(&dentry->d_lock);
-               goto out;
-       }
-
-       inode = dentry->d_inode;
-       if (inode && !spin_trylock(&inode->i_lock)) {
-relock:
-               if (unlock_on_failure) {
-                       spin_unlock(&dentry->d_lock);
-                       cpu_relax();
-               }
-               return dentry; /* try again with same dentry */
-       }
        if (!IS_ROOT(dentry))
                parent = dentry->d_parent;
-       if (parent && !spin_trylock(&parent->d_lock)) {
-               if (inode)
-                       spin_unlock(&inode->i_lock);
-               goto relock;
-       }
 
        /*
         * The dentry is now unrecoverably dead to the world.
@@ -520,9 +490,72 @@ relock:
                can_free = false;
        }
        spin_unlock(&dentry->d_lock);
-out:
        if (likely(can_free))
                dentry_free(dentry);
+}
+
+/*
+ * Finish off a dentry we've decided to kill.
+ * dentry->d_lock must be held, returns with it unlocked.
+ * If ref is non-zero, then decrement the refcount too.
+ * Returns dentry requiring refcount drop, or NULL if we're done.
+ */
+static struct dentry *dentry_kill(struct dentry *dentry)
+       __releases(dentry->d_lock)
+{
+       struct inode *inode = dentry->d_inode;
+       struct dentry *parent = NULL;
+
+       if (inode && unlikely(!spin_trylock(&inode->i_lock)))
+               goto failed;
+
+       if (!IS_ROOT(dentry)) {
+               parent = dentry->d_parent;
+               if (unlikely(!spin_trylock(&parent->d_lock))) {
+                       if (inode)
+                               spin_unlock(&inode->i_lock);
+                       goto failed;
+               }
+       }
+
+       __dentry_kill(dentry);
+       return parent;
+
+failed:
+       spin_unlock(&dentry->d_lock);
+       cpu_relax();
+       return dentry; /* try again with same dentry */
+}
+
+static inline struct dentry *lock_parent(struct dentry *dentry)
+{
+       struct dentry *parent = dentry->d_parent;
+       if (IS_ROOT(dentry))
+               return NULL;
+       if (likely(spin_trylock(&parent->d_lock)))
+               return parent;
+       spin_unlock(&dentry->d_lock);
+       rcu_read_lock();
+again:
+       parent = ACCESS_ONCE(dentry->d_parent);
+       spin_lock(&parent->d_lock);
+       /*
+        * We can't blindly lock dentry until we are sure
+        * that we won't violate the locking order.
+        * Any changes of dentry->d_parent must have
+        * been done with parent->d_lock held, so
+        * spin_lock() above is enough of a barrier
+        * for checking if it's still our child.
+        */
+       if (unlikely(parent != dentry->d_parent)) {
+               spin_unlock(&parent->d_lock);
+               goto again;
+       }
+       rcu_read_unlock();
+       if (parent != dentry)
+               spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+       else
+               parent = NULL;
        return parent;
 }
 
@@ -579,7 +612,7 @@ repeat:
        return;
 
 kill_it:
-       dentry = dentry_kill(dentry, 1);
+       dentry = dentry_kill(dentry);
        if (dentry)
                goto repeat;
 }
@@ -797,8 +830,11 @@ static void shrink_dentry_list(struct list_head *list)
        struct dentry *dentry, *parent;
 
        while (!list_empty(list)) {
+               struct inode *inode;
                dentry = list_entry(list->prev, struct dentry, d_lru);
                spin_lock(&dentry->d_lock);
+               parent = lock_parent(dentry);
+
                /*
                 * The dispose list is isolated and dentries are not accounted
                 * to the LRU here, so we can simply remove it from the list
@@ -812,26 +848,33 @@ static void shrink_dentry_list(struct list_head *list)
                 */
                if ((int)dentry->d_lockref.count > 0) {
                        spin_unlock(&dentry->d_lock);
+                       if (parent)
+                               spin_unlock(&parent->d_lock);
                        continue;
                }
 
-               parent = dentry_kill(dentry, 0);
-               /*
-                * If dentry_kill returns NULL, we have nothing more to do.
-                */
-               if (!parent)
+
+               if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
+                       bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
+                       spin_unlock(&dentry->d_lock);
+                       if (parent)
+                               spin_unlock(&parent->d_lock);
+                       if (can_free)
+                               dentry_free(dentry);
                        continue;
+               }
 
-               if (unlikely(parent == dentry)) {
-                       /*
-                        * trylocks have failed and d_lock has been held the
-                        * whole time, so it could not have been added to any
-                        * other lists. Just add it back to the shrink list.
-                        */
+               inode = dentry->d_inode;
+               if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
                        d_shrink_add(dentry, list);
                        spin_unlock(&dentry->d_lock);
+                       if (parent)
+                               spin_unlock(&parent->d_lock);
                        continue;
                }
+
+               __dentry_kill(dentry);
+
                /*
                 * We need to prune ancestors too. This is necessary to prevent
                 * quadratic behavior of shrink_dcache_parent(), but is also
@@ -839,8 +882,26 @@ static void shrink_dentry_list(struct list_head *list)
                 * fragmentation.
                 */
                dentry = parent;
-               while (dentry && !lockref_put_or_lock(&dentry->d_lockref))
-                       dentry = dentry_kill(dentry, 1);
+               while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
+                       parent = lock_parent(dentry);
+                       if (dentry->d_lockref.count != 1) {
+                               dentry->d_lockref.count--;
+                               spin_unlock(&dentry->d_lock);
+                               if (parent)
+                                       spin_unlock(&parent->d_lock);
+                               break;
+                       }
+                       inode = dentry->d_inode;        /* can't be NULL */
+                       if (unlikely(!spin_trylock(&inode->i_lock))) {
+                               spin_unlock(&dentry->d_lock);
+                               if (parent)
+                                       spin_unlock(&parent->d_lock);
+                               cpu_relax();
+                               continue;
+                       }
+                       __dentry_kill(dentry);
+                       dentry = parent;
+               }
        }
 }
 
index 476f3ebf437ef40ddd7432200080825b7e9e992c..238b7aa26f68ab538df0cc219073a3d26541cc11 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -657,10 +657,10 @@ int setup_arg_pages(struct linux_binprm *bprm,
        unsigned long rlim_stack;
 
 #ifdef CONFIG_STACK_GROWSUP
-       /* Limit stack size to 1GB */
+       /* Limit stack size */
        stack_base = rlimit_max(RLIMIT_STACK);
-       if (stack_base > (1 << 30))
-               stack_base = 1 << 30;
+       if (stack_base > STACK_SIZE_MAX)
+               stack_base = STACK_SIZE_MAX;
 
        /* Make sure we didn't let the argument array grow too large. */
        if (vma->vm_end - vma->vm_start > stack_base)
index e01ea4a14a014b3123dddf6d2ad1ecb9a6381736..5e9a80cfc3d8857c7cfe496eae627c921c47c6f2 100644 (file)
@@ -610,6 +610,7 @@ static void kernfs_put_open_node(struct kernfs_node *kn,
 static int kernfs_fop_open(struct inode *inode, struct file *file)
 {
        struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
+       struct kernfs_root *root = kernfs_root(kn);
        const struct kernfs_ops *ops;
        struct kernfs_open_file *of;
        bool has_read, has_write, has_mmap;
@@ -624,14 +625,16 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
        has_write = ops->write || ops->mmap;
        has_mmap = ops->mmap;
 
-       /* check perms and supported operations */
-       if ((file->f_mode & FMODE_WRITE) &&
-           (!(inode->i_mode & S_IWUGO) || !has_write))
-               goto err_out;
+       /* see the flag definition for details */
+       if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) {
+               if ((file->f_mode & FMODE_WRITE) &&
+                   (!(inode->i_mode & S_IWUGO) || !has_write))
+                       goto err_out;
 
-       if ((file->f_mode & FMODE_READ) &&
-           (!(inode->i_mode & S_IRUGO) || !has_read))
-               goto err_out;
+               if ((file->f_mode & FMODE_READ) &&
+                   (!(inode->i_mode & S_IRUGO) || !has_read))
+                       goto err_out;
+       }
 
        /* allocate a kernfs_open_file for the file */
        error = -ENOMEM;
index e663aeac579e5d8aaa2596a177114b17f262bd6c..e390bd9ae068696d4a5425057559d3037e6f4518 100644 (file)
@@ -389,18 +389,6 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
        fl->fl_ops = NULL;
        fl->fl_lmops = NULL;
 
-       /* Ensure that fl->fl_filp has compatible f_mode */
-       switch (l->l_type) {
-       case F_RDLCK:
-               if (!(filp->f_mode & FMODE_READ))
-                       return -EBADF;
-               break;
-       case F_WRLCK:
-               if (!(filp->f_mode & FMODE_WRITE))
-                       return -EBADF;
-               break;
-       }
-
        return assign_type(fl, l->l_type);
 }
 
@@ -2034,6 +2022,22 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd,
        return error;
 }
 
+/* Ensure that fl->fl_filp has compatible f_mode for F_SETLK calls */
+static int
+check_fmode_for_setlk(struct file_lock *fl)
+{
+       switch (fl->fl_type) {
+       case F_RDLCK:
+               if (!(fl->fl_file->f_mode & FMODE_READ))
+                       return -EBADF;
+               break;
+       case F_WRLCK:
+               if (!(fl->fl_file->f_mode & FMODE_WRITE))
+                       return -EBADF;
+       }
+       return 0;
+}
+
 /* Apply the lock described by l to an open file descriptor.
  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
  */
@@ -2071,6 +2075,10 @@ again:
        if (error)
                goto out;
 
+       error = check_fmode_for_setlk(file_lock);
+       if (error)
+               goto out;
+
        /*
         * If the cmd is requesting file-private locks, then set the
         * FL_OFDLCK flag and override the owner.
@@ -2206,6 +2214,10 @@ again:
        if (error)
                goto out;
 
+       error = check_fmode_for_setlk(file_lock);
+       if (error)
+               goto out;
+
        /*
         * If the cmd is requesting file-private locks, then set the
         * FL_OFDLCK flag and override the owner.
index 6f3f392d48af76d9b7bdb752f1d13eff9580be1b..f66c66b9f18285a4084114679d0e1d3e555a253c 100644 (file)
@@ -402,8 +402,10 @@ sort_pacl(struct posix_acl *pacl)
         * by uid/gid. */
        int i, j;
 
-       if (pacl->a_count <= 4)
-               return; /* no users or groups */
+       /* no users or groups */
+       if (!pacl || pacl->a_count <= 4)
+               return;
+
        i = 1;
        while (pacl->a_entries[i].e_tag == ACL_USER)
                i++;
@@ -530,13 +532,12 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
 
        /*
         * ACLs with no ACEs are treated differently in the inheritable
-        * and effective cases: when there are no inheritable ACEs, we
-        * set a zero-length default posix acl:
+        * and effective cases: when there are no inheritable ACEs,
+        * calls ->set_acl with a NULL ACL structure.
         */
-       if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) {
-               pacl = posix_acl_alloc(0, GFP_KERNEL);
-               return pacl ? pacl : ERR_PTR(-ENOMEM);
-       }
+       if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT))
+               return NULL;
+
        /*
         * When there are no effective ACEs, the following will end
         * up setting a 3-element effective posix ACL with all
@@ -589,7 +590,7 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
                add_to_mask(state, &state->groups->aces[i].perms);
        }
 
-       if (!state->users->n && !state->groups->n) {
+       if (state->users->n || state->groups->n) {
                pace++;
                pace->e_tag = ACL_MASK;
                low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags);
index 3ba65979a3cde006e73ed2c310e2bc42f6988fb2..9a77a5a21557c4e16740196c16d7f4f218ce2643 100644 (file)
@@ -1078,6 +1078,18 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
                return NULL;
        }
        clp->cl_name.len = name.len;
+       INIT_LIST_HEAD(&clp->cl_sessions);
+       idr_init(&clp->cl_stateids);
+       atomic_set(&clp->cl_refcount, 0);
+       clp->cl_cb_state = NFSD4_CB_UNKNOWN;
+       INIT_LIST_HEAD(&clp->cl_idhash);
+       INIT_LIST_HEAD(&clp->cl_openowners);
+       INIT_LIST_HEAD(&clp->cl_delegations);
+       INIT_LIST_HEAD(&clp->cl_lru);
+       INIT_LIST_HEAD(&clp->cl_callbacks);
+       INIT_LIST_HEAD(&clp->cl_revoked);
+       spin_lock_init(&clp->cl_lock);
+       rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
        return clp;
 }
 
@@ -1095,6 +1107,7 @@ free_client(struct nfs4_client *clp)
                WARN_ON_ONCE(atomic_read(&ses->se_ref));
                free_session(ses);
        }
+       rpc_destroy_wait_queue(&clp->cl_cb_waitq);
        free_svc_cred(&clp->cl_cred);
        kfree(clp->cl_name.data);
        idr_destroy(&clp->cl_stateids);
@@ -1347,7 +1360,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
        if (clp == NULL)
                return NULL;
 
-       INIT_LIST_HEAD(&clp->cl_sessions);
        ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
        if (ret) {
                spin_lock(&nn->client_lock);
@@ -1355,20 +1367,9 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
                spin_unlock(&nn->client_lock);
                return NULL;
        }
-       idr_init(&clp->cl_stateids);
-       atomic_set(&clp->cl_refcount, 0);
-       clp->cl_cb_state = NFSD4_CB_UNKNOWN;
-       INIT_LIST_HEAD(&clp->cl_idhash);
-       INIT_LIST_HEAD(&clp->cl_openowners);
-       INIT_LIST_HEAD(&clp->cl_delegations);
-       INIT_LIST_HEAD(&clp->cl_lru);
-       INIT_LIST_HEAD(&clp->cl_callbacks);
-       INIT_LIST_HEAD(&clp->cl_revoked);
-       spin_lock_init(&clp->cl_lock);
        nfsd4_init_callback(&clp->cl_cb_null);
        clp->cl_time = get_seconds();
        clear_bit(0, &clp->cl_cb_slot_busy);
-       rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
        copy_verf(clp, verf);
        rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
        gen_confirm(clp);
@@ -3716,9 +3717,16 @@ out:
 static __be32
 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
 {
-       if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
+       struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
+
+       if (check_for_locks(stp->st_file, lo))
                return nfserr_locks_held;
-       release_lock_stateid(stp);
+       /*
+        * Currently there's a 1-1 lock stateid<->lockowner
+        * correspondance, and we have to delete the lockowner when we
+        * delete the lock stateid:
+        */
+       unhash_lockowner(lo);
        return nfs_ok;
 }
 
@@ -4158,6 +4166,10 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c
 
        if (!same_owner_str(&lo->lo_owner, owner, clid))
                return false;
+       if (list_empty(&lo->lo_owner.so_stateids)) {
+               WARN_ON_ONCE(1);
+               return false;
+       }
        lst = list_first_entry(&lo->lo_owner.so_stateids,
                               struct nfs4_ol_stateid, st_perstateowner);
        return lst->st_file->fi_inode == inode;
index af3f7aa73e13a007d06fd528d8c8d101d2553087..ee1f88419cb0640d38203eb3a16c7b03094f7c56 100644 (file)
@@ -472,11 +472,15 @@ bail:
 
 void dlm_destroy_master_caches(void)
 {
-       if (dlm_lockname_cache)
+       if (dlm_lockname_cache) {
                kmem_cache_destroy(dlm_lockname_cache);
+               dlm_lockname_cache = NULL;
+       }
 
-       if (dlm_lockres_cache)
+       if (dlm_lockres_cache) {
                kmem_cache_destroy(dlm_lockres_cache);
+               dlm_lockres_cache = NULL;
+       }
 }
 
 static void dlm_lockres_release(struct kref *kref)
index 8e7eef755a9b259b4debacb54d8377e8c2a88c0f..f5cb9ba84510fe5632a62af0bbf3843a45eeba23 100644 (file)
@@ -1548,7 +1548,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
        struct iovec iovstack[UIO_FASTIOV];
        struct iovec *iov = iovstack;
        struct iov_iter iter;
-       ssize_t count = 0;
+       ssize_t count;
 
        pipe = get_pipe_info(file);
        if (!pipe)
@@ -1557,8 +1557,9 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
        ret = rw_copy_check_uvector(READ, uiov, nr_segs,
                                    ARRAY_SIZE(iovstack), iovstack, &iov);
        if (ret <= 0)
-               return ret;
+               goto out;
 
+       count = ret;
        iov_iter_init(&iter, READ, iov, nr_segs, count);
 
        sd.len = 0;
@@ -1571,6 +1572,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
        ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
        pipe_unlock(pipe);
 
+out:
        if (iov != iovstack)
                kfree(iov);
 
index 28cc1acd5439bf8caeb6d93e8bf68804ad738b1b..e9ef59b3abb1e5552cdc2a8880df37b85093d249 100644 (file)
@@ -47,12 +47,13 @@ static int sysfs_kf_seq_show(struct seq_file *sf, void *v)
        ssize_t count;
        char *buf;
 
-       /* acquire buffer and ensure that it's >= PAGE_SIZE */
+       /* acquire buffer and ensure that it's >= PAGE_SIZE and clear */
        count = seq_get_buf(sf, &buf);
        if (count < PAGE_SIZE) {
                seq_commit(sf, -1);
                return 0;
        }
+       memset(buf, 0, PAGE_SIZE);
 
        /*
         * Invoke show().  Control may reach here via seq file lseek even
index a66ad6196f59cca2f61a31c4a966d41b92497ad8..8794423f7efbe2c90408ba9dfa6cbb7e55e6c3d9 100644 (file)
@@ -63,7 +63,8 @@ int __init sysfs_init(void)
 {
        int err;
 
-       sysfs_root = kernfs_create_root(NULL, 0, NULL);
+       sysfs_root = kernfs_create_root(NULL, KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
+                                       NULL);
        if (IS_ERR(sysfs_root))
                return PTR_ERR(sysfs_root);
 
index 01b6a0102fbdd4d153612740d95c4f00fda691f7..abda1124a70f66fbd1a79b28f8eaf5bf1916da1e 100644 (file)
@@ -213,7 +213,7 @@ xfs_attr_calc_size(
                 * Out of line attribute, cannot double split, but
                 * make room for the attribute value itself.
                 */
-               uint    dblocks = XFS_B_TO_FSB(mp, valuelen);
+               uint    dblocks = xfs_attr3_rmt_blocks(mp, valuelen);
                nblks += dblocks;
                nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK);
        }
@@ -698,11 +698,22 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
 
                trace_xfs_attr_leaf_replace(args);
 
+               /* save the attribute state for later removal*/
                args->op_flags |= XFS_DA_OP_RENAME;     /* an atomic rename */
                args->blkno2 = args->blkno;             /* set 2nd entry info*/
                args->index2 = args->index;
                args->rmtblkno2 = args->rmtblkno;
                args->rmtblkcnt2 = args->rmtblkcnt;
+               args->rmtvaluelen2 = args->rmtvaluelen;
+
+               /*
+                * clear the remote attr state now that it is saved so that the
+                * values reflect the state of the attribute we are about to
+                * add, not the attribute we just found and will remove later.
+                */
+               args->rmtblkno = 0;
+               args->rmtblkcnt = 0;
+               args->rmtvaluelen = 0;
        }
 
        /*
@@ -794,6 +805,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
                args->blkno = args->blkno2;
                args->rmtblkno = args->rmtblkno2;
                args->rmtblkcnt = args->rmtblkcnt2;
+               args->rmtvaluelen = args->rmtvaluelen2;
                if (args->rmtblkno) {
                        error = xfs_attr_rmtval_remove(args);
                        if (error)
@@ -999,13 +1011,22 @@ restart:
 
                trace_xfs_attr_node_replace(args);
 
+               /* save the attribute state for later removal*/
                args->op_flags |= XFS_DA_OP_RENAME;     /* atomic rename op */
                args->blkno2 = args->blkno;             /* set 2nd entry info*/
                args->index2 = args->index;
                args->rmtblkno2 = args->rmtblkno;
                args->rmtblkcnt2 = args->rmtblkcnt;
+               args->rmtvaluelen2 = args->rmtvaluelen;
+
+               /*
+                * clear the remote attr state now that it is saved so that the
+                * values reflect the state of the attribute we are about to
+                * add, not the attribute we just found and will remove later.
+                */
                args->rmtblkno = 0;
                args->rmtblkcnt = 0;
+               args->rmtvaluelen = 0;
        }
 
        retval = xfs_attr3_leaf_add(blk->bp, state->args);
@@ -1133,6 +1154,7 @@ restart:
                args->blkno = args->blkno2;
                args->rmtblkno = args->rmtblkno2;
                args->rmtblkcnt = args->rmtblkcnt2;
+               args->rmtvaluelen = args->rmtvaluelen2;
                if (args->rmtblkno) {
                        error = xfs_attr_rmtval_remove(args);
                        if (error)
index fe9587fab17a6822d9bec1b5862a6a133adf9dc7..511c283459b19441d782114c4f832207860deab1 100644 (file)
@@ -1229,6 +1229,7 @@ xfs_attr3_leaf_add_work(
                name_rmt->valueblk = 0;
                args->rmtblkno = 1;
                args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
+               args->rmtvaluelen = args->valuelen;
        }
        xfs_trans_log_buf(args->trans, bp,
             XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
@@ -2167,11 +2168,11 @@ xfs_attr3_leaf_lookup_int(
                        if (!xfs_attr_namesp_match(args->flags, entry->flags))
                                continue;
                        args->index = probe;
-                       args->valuelen = be32_to_cpu(name_rmt->valuelen);
+                       args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
                        args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
                        args->rmtblkcnt = xfs_attr3_rmt_blocks(
                                                        args->dp->i_mount,
-                                                       args->valuelen);
+                                                       args->rmtvaluelen);
                        return XFS_ERROR(EEXIST);
                }
        }
@@ -2220,19 +2221,19 @@ xfs_attr3_leaf_getvalue(
                name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
                ASSERT(name_rmt->namelen == args->namelen);
                ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
-               valuelen = be32_to_cpu(name_rmt->valuelen);
+               args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
                args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
                args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount,
-                                                      valuelen);
+                                                      args->rmtvaluelen);
                if (args->flags & ATTR_KERNOVAL) {
-                       args->valuelen = valuelen;
+                       args->valuelen = args->rmtvaluelen;
                        return 0;
                }
-               if (args->valuelen < valuelen) {
-                       args->valuelen = valuelen;
+               if (args->valuelen < args->rmtvaluelen) {
+                       args->valuelen = args->rmtvaluelen;
                        return XFS_ERROR(ERANGE);
                }
-               args->valuelen = valuelen;
+               args->valuelen = args->rmtvaluelen;
        }
        return 0;
 }
@@ -2519,7 +2520,7 @@ xfs_attr3_leaf_clearflag(
                ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0);
                name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
                name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
-               name_rmt->valuelen = cpu_to_be32(args->valuelen);
+               name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen);
                xfs_trans_log_buf(args->trans, bp,
                         XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
        }
@@ -2677,7 +2678,7 @@ xfs_attr3_leaf_flipflags(
                ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0);
                name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index);
                name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
-               name_rmt->valuelen = cpu_to_be32(args->valuelen);
+               name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen);
                xfs_trans_log_buf(args->trans, bp1,
                         XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt)));
        }
index 01db96f60cf05abf1f42e20252cdff4de0ebea0d..833fe5d98d806783ca6bb6cf2f4ecb83424bfdca 100644 (file)
@@ -447,6 +447,7 @@ xfs_attr3_leaf_list_int(
                                args.dp = context->dp;
                                args.whichfork = XFS_ATTR_FORK;
                                args.valuelen = valuelen;
+                               args.rmtvaluelen = valuelen;
                                args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
                                args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
                                args.rmtblkcnt = xfs_attr3_rmt_blocks(
index 6e37823e2932aeb45d112b55010ab627adb5bde2..d2e6e948cec7be3013b853033b6e1a64b365a682 100644 (file)
@@ -337,7 +337,7 @@ xfs_attr_rmtval_get(
        struct xfs_buf          *bp;
        xfs_dablk_t             lblkno = args->rmtblkno;
        __uint8_t               *dst = args->value;
-       int                     valuelen = args->valuelen;
+       int                     valuelen;
        int                     nmap;
        int                     error;
        int                     blkcnt = args->rmtblkcnt;
@@ -347,7 +347,9 @@ xfs_attr_rmtval_get(
        trace_xfs_attr_rmtval_get(args);
 
        ASSERT(!(args->flags & ATTR_KERNOVAL));
+       ASSERT(args->rmtvaluelen == args->valuelen);
 
+       valuelen = args->rmtvaluelen;
        while (valuelen > 0) {
                nmap = ATTR_RMTVALUE_MAPSIZE;
                error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
@@ -415,7 +417,7 @@ xfs_attr_rmtval_set(
         * attributes have headers, we can't just do a straight byte to FSB
         * conversion and have to take the header space into account.
         */
-       blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
+       blkcnt = xfs_attr3_rmt_blocks(mp, args->rmtvaluelen);
        error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
                                                   XFS_ATTR_FORK);
        if (error)
@@ -480,7 +482,7 @@ xfs_attr_rmtval_set(
         */
        lblkno = args->rmtblkno;
        blkcnt = args->rmtblkcnt;
-       valuelen = args->valuelen;
+       valuelen = args->rmtvaluelen;
        while (valuelen > 0) {
                struct xfs_buf  *bp;
                xfs_daddr_t     dblkno;
index 6e95ea79f5d73aa2447fc49eb0e4c7e9bb73a25d..201c6091d26abfa2e38dd17fd960e3efcb69bcd7 100644 (file)
@@ -60,10 +60,12 @@ typedef struct xfs_da_args {
        int             index;          /* index of attr of interest in blk */
        xfs_dablk_t     rmtblkno;       /* remote attr value starting blkno */
        int             rmtblkcnt;      /* remote attr value block count */
+       int             rmtvaluelen;    /* remote attr value length in bytes */
        xfs_dablk_t     blkno2;         /* blkno of 2nd attr leaf of interest */
        int             index2;         /* index of 2nd attr in blk */
        xfs_dablk_t     rmtblkno2;      /* remote attr value starting blkno */
        int             rmtblkcnt2;     /* remote attr value block count */
+       int             rmtvaluelen2;   /* remote attr value length in bytes */
        int             op_flags;       /* operation flags */
        enum xfs_dacmp  cmpresult;      /* name compare result for lookups */
 } xfs_da_args_t;
index 1399e187d425dc7af0f8b5062afaf312fe5a0927..753e467aa1a5991d0175087284ff9cde35591c40 100644 (file)
@@ -237,7 +237,7 @@ xfs_fs_nfs_commit_metadata(
 
        if (!lsn)
                return 0;
-       return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
+       return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
 }
 
 const struct export_operations xfs_export_operations = {
index b1c489c1fb2e4496e8ca7d61955e32779d16e439..500c3f0656d0a27676955c7cfc757291fbee3d5d 100644 (file)
@@ -155,7 +155,7 @@ xfs_dir_fsync(
 
        if (!lsn)
                return 0;
-       return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
+       return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
 }
 
 STATIC int
@@ -288,7 +288,7 @@ xfs_file_read_iter(
                xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
 
                if (inode->i_mapping->nrpages) {
-                       ret = -filemap_write_and_wait_range(
+                       ret = filemap_write_and_wait_range(
                                                        VFS_I(ip)->i_mapping,
                                                        pos, -1);
                        if (ret) {
@@ -772,7 +772,7 @@ xfs_file_fallocate(
                unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
 
                if (offset & blksize_mask || len & blksize_mask) {
-                       error = -EINVAL;
+                       error = EINVAL;
                        goto out_unlock;
                }
 
@@ -781,7 +781,7 @@ xfs_file_fallocate(
                 * in which case it is effectively a truncate operation
                 */
                if (offset + len >= i_size_read(inode)) {
-                       error = -EINVAL;
+                       error = EINVAL;
                        goto out_unlock;
                }
 
index ef1ca010f417713358c0d0f3869189121e2f0ff0..36d630319a2784c7fe39f83fc9d66b74ee3add7b 100644 (file)
@@ -72,8 +72,8 @@ xfs_initxattrs(
        int                     error = 0;
 
        for (xattr = xattr_array; xattr->name != NULL; xattr++) {
-               error = xfs_attr_set(ip, xattr->name, xattr->value,
-                                    xattr->value_len, ATTR_SECURE);
+               error = -xfs_attr_set(ip, xattr->name, xattr->value,
+                                     xattr->value_len, ATTR_SECURE);
                if (error < 0)
                        break;
        }
@@ -93,8 +93,8 @@ xfs_init_security(
        struct inode    *dir,
        const struct qstr *qstr)
 {
-       return security_inode_init_security(inode, dir, qstr,
-                                           &xfs_initxattrs, NULL);
+       return -security_inode_init_security(inode, dir, qstr,
+                                            &xfs_initxattrs, NULL);
 }
 
 static void
@@ -124,15 +124,15 @@ xfs_cleanup_inode(
        xfs_dentry_to_name(&teardown, dentry, 0);
 
        xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
-       iput(inode);
 }
 
 STATIC int
-xfs_vn_mknod(
+xfs_generic_create(
        struct inode    *dir,
        struct dentry   *dentry,
        umode_t         mode,
-       dev_t           rdev)
+       dev_t           rdev,
+       bool            tmpfile)        /* unnamed file */
 {
        struct inode    *inode;
        struct xfs_inode *ip = NULL;
@@ -156,8 +156,12 @@ xfs_vn_mknod(
        if (error)
                return error;
 
-       xfs_dentry_to_name(&name, dentry, mode);
-       error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
+       if (!tmpfile) {
+               xfs_dentry_to_name(&name, dentry, mode);
+               error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
+       } else {
+               error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
+       }
        if (unlikely(error))
                goto out_free_acl;
 
@@ -169,18 +173,22 @@ xfs_vn_mknod(
 
 #ifdef CONFIG_XFS_POSIX_ACL
        if (default_acl) {
-               error = xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+               error = -xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
                if (error)
                        goto out_cleanup_inode;
        }
        if (acl) {
-               error = xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
+               error = -xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
                if (error)
                        goto out_cleanup_inode;
        }
 #endif
 
-       d_instantiate(dentry, inode);
+       if (tmpfile)
+               d_tmpfile(dentry, inode);
+       else
+               d_instantiate(dentry, inode);
+
  out_free_acl:
        if (default_acl)
                posix_acl_release(default_acl);
@@ -189,10 +197,22 @@ xfs_vn_mknod(
        return -error;
 
  out_cleanup_inode:
-       xfs_cleanup_inode(dir, inode, dentry);
+       if (!tmpfile)
+               xfs_cleanup_inode(dir, inode, dentry);
+       iput(inode);
        goto out_free_acl;
 }
 
+STATIC int
+xfs_vn_mknod(
+       struct inode    *dir,
+       struct dentry   *dentry,
+       umode_t         mode,
+       dev_t           rdev)
+{
+       return xfs_generic_create(dir, dentry, mode, rdev, false);
+}
+
 STATIC int
 xfs_vn_create(
        struct inode    *dir,
@@ -353,6 +373,7 @@ xfs_vn_symlink(
 
  out_cleanup_inode:
        xfs_cleanup_inode(dir, inode, dentry);
+       iput(inode);
  out:
        return -error;
 }
@@ -1053,25 +1074,7 @@ xfs_vn_tmpfile(
        struct dentry   *dentry,
        umode_t         mode)
 {
-       int                     error;
-       struct xfs_inode        *ip;
-       struct inode            *inode;
-
-       error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
-       if (unlikely(error))
-               return -error;
-
-       inode = VFS_I(ip);
-
-       error = xfs_init_security(inode, dir, &dentry->d_name);
-       if (unlikely(error)) {
-               iput(inode);
-               return -error;
-       }
-
-       d_tmpfile(dentry, inode);
-
-       return 0;
+       return xfs_generic_create(dir, dentry, mode, 0, true);
 }
 
 static const struct inode_operations xfs_inode_operations = {
index 08624dc67317185b1e044fd7d67ef4573d941ef6..a5f8bd9899d37a811af38a616da44f66d73d399f 100644 (file)
@@ -616,11 +616,13 @@ xfs_log_mount(
        int             error = 0;
        int             min_logfsbs;
 
-       if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
-               xfs_notice(mp, "Mounting Filesystem");
-       else {
+       if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
+               xfs_notice(mp, "Mounting V%d Filesystem",
+                          XFS_SB_VERSION_NUM(&mp->m_sb));
+       } else {
                xfs_notice(mp,
-"Mounting filesystem in no-recovery mode.  Filesystem will be inconsistent.");
+"Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.",
+                          XFS_SB_VERSION_NUM(&mp->m_sb));
                ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
        }
 
index 993cb19e7d390e03220f265e9ba97e0f0c8dc7fa..944f3d9456a8b4f6f0fe44721fe98f9f52c3bd69 100644 (file)
@@ -743,8 +743,6 @@ xfs_mountfs(
                new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
                if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
                        mp->m_inode_cluster_size = new_size;
-               xfs_info(mp, "Using inode cluster size of %d bytes",
-                        mp->m_inode_cluster_size);
        }
 
        /*
index 348e4d2ed6e6e9621b82c535212e32093b51da0c..dc977b6e6a365a4222fb79b212f5c2b23b46b84c 100644 (file)
@@ -843,22 +843,17 @@ xfs_qm_init_quotainfo(
 
        qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
 
-       if ((error = list_lru_init(&qinf->qi_lru))) {
-               kmem_free(qinf);
-               mp->m_quotainfo = NULL;
-               return error;
-       }
+       error = -list_lru_init(&qinf->qi_lru);
+       if (error)
+               goto out_free_qinf;
 
        /*
         * See if quotainodes are setup, and if not, allocate them,
         * and change the superblock accordingly.
         */
-       if ((error = xfs_qm_init_quotainos(mp))) {
-               list_lru_destroy(&qinf->qi_lru);
-               kmem_free(qinf);
-               mp->m_quotainfo = NULL;
-               return error;
-       }
+       error = xfs_qm_init_quotainos(mp);
+       if (error)
+               goto out_free_lru;
 
        INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
        INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
@@ -918,7 +913,7 @@ xfs_qm_init_quotainfo(
                qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
                qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
                qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
+
                xfs_qm_dqdestroy(dqp);
        } else {
                qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
@@ -935,6 +930,13 @@ xfs_qm_init_quotainfo(
        qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
        register_shrinker(&qinf->qi_shrinker);
        return 0;
+
+out_free_lru:
+       list_lru_destroy(&qinf->qi_lru);
+out_free_qinf:
+       kmem_free(qinf);
+       mp->m_quotainfo = NULL;
+       return error;
 }
 
 
index 0c0e41bbe4e369d7bf5267ed906e59159a4e2ff7..8baf61afae1ddf132757caad16c359ec447dcc39 100644 (file)
@@ -201,10 +201,6 @@ xfs_mount_validate_sb(
         * write validation, we don't need to check feature masks.
         */
        if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
-               xfs_alert(mp,
-"Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n"
-"Use of these features in this kernel is at your own risk!");
-
                if (xfs_sb_has_compat_feature(sbp,
                                        XFS_SB_FEAT_COMPAT_UNKNOWN)) {
                        xfs_warn(mp,
index 2053767763773b60d7b7f8aca31f3b31a9219785..3494eff8e4ebfbce7256fd8d95769e2a1799619c 100644 (file)
@@ -1433,11 +1433,11 @@ xfs_fs_fill_super(
        if (error)
                goto out_free_fsname;
 
-       error = xfs_init_mount_workqueues(mp);
+       error = -xfs_init_mount_workqueues(mp);
        if (error)
                goto out_close_devices;
 
-       error = xfs_icsb_init_counters(mp);
+       error = -xfs_icsb_init_counters(mp);
        if (error)
                goto out_destroy_workqueues;
 
index b4ea8f50fc65ed409335bf37b3088dfde626d4a2..5e752b9590548448b42080151169cc7e38a1245c 100644 (file)
@@ -12,7 +12,7 @@
        [RLIMIT_CPU]            = {  RLIM_INFINITY,  RLIM_INFINITY },   \
        [RLIMIT_FSIZE]          = {  RLIM_INFINITY,  RLIM_INFINITY },   \
        [RLIMIT_DATA]           = {  RLIM_INFINITY,  RLIM_INFINITY },   \
-       [RLIMIT_STACK]          = {       _STK_LIM,   _STK_LIM_MAX },   \
+       [RLIMIT_STACK]          = {       _STK_LIM,  RLIM_INFINITY },   \
        [RLIMIT_CORE]           = {              0,  RLIM_INFINITY },   \
        [RLIMIT_RSS]            = {  RLIM_INFINITY,  RLIM_INFINITY },   \
        [RLIMIT_NPROC]          = {              0,              0 },   \
index 49376aec2fbb8a9e054b9605fd9de36e4cd39c5f..6dfd64b3a6042d34176e85f25e73ed477a7c600e 100644 (file)
        {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
index 940ece4934bab0e0c17f491b28da059454967621..012d58fa8ff0ebb69812e2b863e6dee6a2ae6c01 100644 (file)
        INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
        INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
        INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
-       INTEL_VGA_DEVICE(0x0A0E, info), /* ULT GT1 reserved */ \
-       INTEL_VGA_DEVICE(0x0A1E, info), /* ULT GT2 reserved */ \
+       INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \
+       INTEL_VGA_DEVICE(0x0A1E, info), /* ULX GT2 mobile */ \
        INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
        INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
        INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
index 63b5eff0a80f647ffc0c6bdb6064ac56d482fc34..fdd7e1b61f60d7d3db83211838d6225a6091c5fc 100644 (file)
@@ -47,6 +47,7 @@ struct amba_driver {
 enum amba_vendor {
        AMBA_VENDOR_ARM = 0x41,
        AMBA_VENDOR_ST = 0x80,
+       AMBA_VENDOR_QCOM = 0x51,
 };
 
 extern struct bus_type amba_bustype;
index c2515851c1aa2038c5298c8ab3288d469cd3bb5b..d60904b9e50532410af2956c3546fa5072cae08e 100644 (file)
@@ -473,6 +473,7 @@ struct cftype {
 };
 
 extern struct cgroup_root cgrp_dfl_root;
+extern struct css_set init_css_set;
 
 static inline bool cgroup_on_dfl(const struct cgroup *cgrp)
 {
@@ -700,6 +701,20 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
        return task_css_check(task, subsys_id, false);
 }
 
+/**
+ * task_css_is_root - test whether a task belongs to the root css
+ * @task: the target task
+ * @subsys_id: the target subsystem ID
+ *
+ * Test whether @task belongs to the root css on the specified subsystem.
+ * May be invoked in any context.
+ */
+static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
+{
+       return task_css_check(task, subsys_id, true) ==
+               init_css_set.subsys[subsys_id];
+}
+
 static inline struct cgroup *task_cgroup(struct task_struct *task,
                                         int subsys_id)
 {
index 8300fb87b84ac1329d8facb1d560767c29c549d8..72cb0ddb9678d21eb2f2edd20a36b594b72160d9 100644 (file)
@@ -429,6 +429,7 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
 typedef void (*dma_async_tx_callback)(void *dma_async_param);
 
 struct dmaengine_unmap_data {
+       u8 map_cnt;
        u8 to_cnt;
        u8 from_cnt;
        u8 bidi_cnt;
index 7c8b20b120eac680f27e3cd1e99630b1fe521799..a9a53b12397b0c4fd29a11cbf5f9cbbd18944fd8 100644 (file)
@@ -56,6 +56,7 @@ struct macvlan_dev {
        int                     numqueues;
        netdev_features_t       tap_features;
        int                     minor;
+       int                     nest_level;
 };
 
 static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
index 13bbbde00e68de454c8cf30f0581796d55eb0a40..b2acc4a1b13c9bd906869bc52d501f53f67d3c87 100644 (file)
@@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
 /* found in socket.c */
 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
 
-static inline int is_vlan_dev(struct net_device *dev)
+static inline bool is_vlan_dev(struct net_device *dev)
 {
         return dev->priv_flags & IFF_802_1Q_VLAN;
 }
@@ -159,6 +159,7 @@ struct vlan_dev_priv {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll                          *netpoll;
 #endif
+       unsigned int                            nest_level;
 };
 
 static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
@@ -197,6 +198,12 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
                                 const struct net_device *by_dev);
 
 extern bool vlan_uses_dev(const struct net_device *dev);
+
+static inline int vlan_get_encap_level(struct net_device *dev)
+{
+       BUG_ON(!is_vlan_dev(dev));
+       return vlan_dev_priv(dev)->nest_level;
+}
 #else
 static inline struct net_device *
 __vlan_find_dev_deep(struct net_device *real_dev,
@@ -263,6 +270,11 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
 {
        return false;
 }
+static inline int vlan_get_encap_level(struct net_device *dev)
+{
+       BUG();
+       return 0;
+}
 #endif
 
 static inline bool vlan_hw_offload_capable(netdev_features_t features,
@@ -483,4 +495,5 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
                 */
                skb->protocol = htons(ETH_P_802_2);
 }
+
 #endif /* !(_LINUX_IF_VLAN_H_) */
index 97ac926c78a707fb6bf45293d00e8f4d86515f43..051c85032f481ae8fa1fb5ca6542df83588a9153 100644 (file)
@@ -272,6 +272,11 @@ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
        return -EINVAL;
 }
 
+static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+       return 0;
+}
+
 static inline int irq_can_set_affinity(unsigned int irq)
 {
        return 0;
index b0122dc6f96a0a21324f86b5a28c725ac3ea74cf..ca1be5c9136c4557de511143b4c9b36e1eec3734 100644 (file)
@@ -50,7 +50,24 @@ enum kernfs_node_flag {
 
 /* @flags for kernfs_create_root() */
 enum kernfs_root_flag {
-       KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001,
+       /*
+        * kernfs_nodes are created in the deactivated state and invisible.
+        * They require explicit kernfs_activate() to become visible.  This
+        * can be used to make related nodes become visible atomically
+        * after all nodes are created successfully.
+        */
+       KERNFS_ROOT_CREATE_DEACTIVATED          = 0x0001,
+
+       /*
+        * For regular flies, if the opener has CAP_DAC_OVERRIDE, open(2)
+        * succeeds regardless of the RW permissions.  sysfs had an extra
+        * layer of enforcement where open(2) fails with -EACCES regardless
+        * of CAP_DAC_OVERRIDE if the permission doesn't have the
+        * respective read or write access at all (none of S_IRUGO or
+        * S_IWUGO) or the respective operation isn't implemented.  The
+        * following flag enables that behavior.
+        */
+       KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK       = 0x0002,
 };
 
 /* type-specific structures for kernfs_node union members */
index 34a513a2727bbe83adff047613a1ad3458684ac2..a6a42dd024661324dbeed5b9cfaa028744bae154 100644 (file)
@@ -12,9 +12,9 @@
 #endif
 
 #ifdef __cplusplus
-#define CPP_ASMLINKAGE extern "C" __visible
+#define CPP_ASMLINKAGE extern "C"
 #else
-#define CPP_ASMLINKAGE __visible
+#define CPP_ASMLINKAGE
 #endif
 
 #ifndef asmlinkage
index 7c36cc55d2c79b8fe90cdec3898751ac4988165f..443176ee1ab04e1f9d2788b51d700eb2a913610c 100644 (file)
@@ -45,7 +45,6 @@ struct platform_device;
 struct rtsx_slot {
        struct platform_device  *p_dev;
        void                    (*card_event)(struct platform_device *p_dev);
-       void                    (*done_transfer)(struct platform_device *p_dev);
 };
 
 #endif
index 8d6bbd609ad9b6142357b0203bf7e0250e923ae4..a3835976f7c639e8f24e17cf2df6dc495f07fae4 100644 (file)
@@ -943,12 +943,6 @@ void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr);
 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout);
 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
                int num_sg, bool read, int timeout);
-int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int num_sg, bool read);
-int rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int num_sg, bool read);
-int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int sg_count, bool read);
 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len);
 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len);
 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card);
index b66e7610d4eec9f4d67e5f8bbd745bb6cbd3c99a..7040dc98ff8baa59118cd42dd728d3152afb7c86 100644 (file)
@@ -421,6 +421,17 @@ struct mlx4_wqe_inline_seg {
        __be32                  byte_count;
 };
 
+enum mlx4_update_qp_attr {
+       MLX4_UPDATE_QP_SMAC             = 1 << 0,
+};
+
+struct mlx4_update_qp_params {
+       u8      smac_index;
+};
+
+int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                  enum mlx4_update_qp_attr attr,
+                  struct mlx4_update_qp_params *params);
 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                   enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
                   struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
index 94734a6259a4d9ee36e25b342c86d3c1bf9d5fcf..17d83393afcc4337d50f00cfa837030d9429c6f8 100644 (file)
@@ -248,24 +248,17 @@ do {                                                              \
 bool __net_get_random_once(void *buf, int nbytes, bool *done,
                           struct static_key *done_key);
 
-#ifdef HAVE_JUMP_LABEL
-#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \
-               { .enabled = ATOMIC_INIT(0), .entries = (void *)1 })
-#else /* !HAVE_JUMP_LABEL */
-#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
-#endif /* HAVE_JUMP_LABEL */
-
 #define net_get_random_once(buf, nbytes)                               \
        ({                                                              \
                bool ___ret = false;                                    \
                static bool ___done = false;                            \
-               static struct static_key ___done_key =                  \
-                       ___NET_RANDOM_STATIC_KEY_INIT;                  \
-               if (!static_key_true(&___done_key))                     \
+               static struct static_key ___once_key =                  \
+                       STATIC_KEY_INIT_TRUE;                           \
+               if (static_key_true(&___once_key))                      \
                        ___ret = __net_get_random_once(buf,             \
                                                       nbytes,          \
                                                       &___done,        \
-                                                      &___done_key);   \
+                                                      &___once_key);   \
                ___ret;                                                 \
        })
 
index 7ed3a3aa6604b7b8511ea709ea2c2312a1d38bb2..b42d07b0390b2e95c3e551bb6639b906472bf70b 100644 (file)
@@ -1144,6 +1144,7 @@ struct net_device_ops {
        netdev_tx_t             (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
                                                        struct net_device *dev,
                                                        void *priv);
+       int                     (*ndo_get_lock_subclass)(struct net_device *dev);
 };
 
 /**
@@ -2950,7 +2951,12 @@ static inline void netif_addr_lock(struct net_device *dev)
 
 static inline void netif_addr_lock_nested(struct net_device *dev)
 {
-       spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
+       int subclass = SINGLE_DEPTH_NESTING;
+
+       if (dev->netdev_ops->ndo_get_lock_subclass)
+               subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
+
+       spin_lock_nested(&dev->addr_list_lock, subclass);
 }
 
 static inline void netif_addr_lock_bh(struct net_device *dev)
@@ -3050,9 +3056,18 @@ extern int               weight_p;
 extern int             bpf_jit_enable;
 
 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
+struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+                                                    struct list_head **iter);
 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
                                                     struct list_head **iter);
 
+/* iterate through upper list, must be called under RCU read lock */
+#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
+       for (iter = &(dev)->adj_list.upper, \
+            updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
+            updev; \
+            updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
+
 /* iterate through upper list, must be called under RCU read lock */
 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
        for (iter = &(dev)->all_adj_list.upper, \
@@ -3077,6 +3092,14 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
             priv; \
             priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
 
+void *netdev_lower_get_next(struct net_device *dev,
+                               struct list_head **iter);
+#define netdev_for_each_lower_dev(dev, ldev, iter) \
+       for (iter = &(dev)->adj_list.lower, \
+            ldev = netdev_lower_get_next(dev, &(iter)); \
+            ldev; \
+            ldev = netdev_lower_get_next(dev, &(iter)))
+
 void *netdev_adjacent_get_private(struct list_head *adj_list);
 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
@@ -3092,6 +3115,8 @@ void netdev_upper_dev_unlink(struct net_device *dev,
 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
 void *netdev_lower_dev_get_private(struct net_device *dev,
                                   struct net_device *lower_dev);
+int dev_get_nest_level(struct net_device *dev,
+                      bool (*type_check)(struct net_device *dev));
 int skb_checksum_help(struct sk_buff *skb);
 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
                                  netdev_features_t features, bool tx_path);
@@ -3180,12 +3205,7 @@ void netdev_change_features(struct net_device *dev);
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
                                        struct net_device *dev);
 
-netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
-                                        const struct net_device *dev);
-static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
-{
-       return netif_skb_dev_features(skb, skb->dev);
-}
+netdev_features_t netif_skb_features(struct sk_buff *skb);
 
 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 {
index 3bad8d106e0ea01b2ae7524e216c83b4e654ced3..e6f0988c1c68a18dbc96c804d5d1c01c0aa6d9fb 100644 (file)
@@ -349,7 +349,7 @@ int of_device_is_stdout_path(struct device_node *dn);
 
 #else /* CONFIG_OF */
 
-static inline const char* of_node_full_name(struct device_node *np)
+static inline const char* of_node_full_name(const struct device_node *np)
 {
        return "<no-node>";
 }
index 6fe8464ed767f0dac6481a6ffc7b39ecaebdd648..881a7c3571f4617b99e0845995800d98eb4f9349 100644 (file)
@@ -31,7 +31,12 @@ extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
 #else /* CONFIG_OF */
 static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 {
-       return -ENOSYS;
+       /*
+        * Fall back to the non-DT function to register a bus.
+        * This way, we don't have to keep compat bits around in drivers.
+        */
+
+       return mdiobus_register(mdio);
 }
 
 static inline struct phy_device *of_phy_find_device(struct device_node *phy_np)
index 41a13e70f41f5ade5228404ec6557ea2be7d113c..7944cdc27bed63df0f8b9cde11f1611af2fdd859 100644 (file)
@@ -10,7 +10,7 @@
 
 struct dma_chan;
 
-#if defined(CONFIG_DMA_OMAP) || defined(CONFIG_DMA_OMAP_MODULE)
+#if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE))
 bool omap_dma_filter_fn(struct dma_chan *, void *);
 #else
 static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d)
index 3356abcfff184e707eccb08d6f99042a8fd51acc..3ef6ea12806a297107ff65b2ad554f5176c8e301 100644 (file)
@@ -402,6 +402,8 @@ struct perf_event {
 
        struct ring_buffer              *rb;
        struct list_head                rb_entry;
+       unsigned long                   rcu_batches;
+       int                             rcu_pending;
 
        /* poll related */
        wait_queue_head_t               waitq;
index 8e3e66ac0a5215d221042e15e631fb2fe2fb51d1..953937ea5233c770d631bf3ae59be60b72630d88 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/mutex.h>
 #include <linux/netdevice.h>
+#include <linux/wait.h>
 #include <uapi/linux/rtnetlink.h>
 
 extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
@@ -22,6 +23,10 @@ extern void rtnl_lock(void);
 extern void rtnl_unlock(void);
 extern int rtnl_trylock(void);
 extern int rtnl_is_locked(void);
+
+extern wait_queue_head_t netdev_unregistering_wq;
+extern struct mutex net_mutex;
+
 #ifdef CONFIG_PROVE_LOCKING
 extern int lockdep_rtnl_is_held(void);
 #else
index 25f54c79f75772a9f133c585e17a2d8e4a59e8ac..221b2bde372363765b5328638bf9320d9c95f5fd 100644 (file)
@@ -220,7 +220,7 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
 #define TASK_PARKED            512
 #define TASK_STATE_MAX         1024
 
-#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
+#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
 
 extern char ___assert_task_state[1 - 2*!!(
                sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -1153,9 +1153,12 @@ struct sched_dl_entity {
         *
         * @dl_boosted tells if we are boosted due to DI. If so we are
         * outside bandwidth enforcement mechanism (but only until we
-        * exit the critical section).
+        * exit the critical section);
+        *
+        * @dl_yielded tells if task gave up the cpu before consuming
+        * all its available runtime during the last job.
         */
-       int dl_throttled, dl_new, dl_boosted;
+       int dl_throttled, dl_new, dl_boosted, dl_yielded;
 
        /*
         * Bandwidth enforcement timer. Each -deadline task has its
index f3539a15c41103b743c0571913fbb93dc402f40a..f856e5a746fae66e2c91357cfe94fc89d0acae18 100644 (file)
@@ -3668,6 +3668,18 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy);
  */
 void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
 
+/**
+ * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped
+ *
+ * @wiphy: the wiphy on which the scheduled scan stopped
+ *
+ * The driver can call this function to inform cfg80211 that the
+ * scheduled scan had to be stopped, for whatever reason.  The driver
+ * is then called back via the sched_scan_stop operation when done.
+ * This function should be called with rtnl locked.
+ */
+void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy);
+
 /**
  * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame
  *
index 6c4f5eac98e7be133af4b868507f0d217ac05c1b..216cecce65e9e1200cd760de64c27ac852ffc85b 100644 (file)
@@ -127,6 +127,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg);
 void rt6_ifdown(struct net *net, struct net_device *dev);
 void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
+void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
 
 
 /*
index 80f500a29498e1fc9b8892e5c66be6bd02362eaa..b2704fd0ec80d714bc9e7dd9b87721da1853173c 100644 (file)
@@ -20,6 +20,11 @@ struct local_ports {
        int             range[2];
 };
 
+struct ping_group_range {
+       seqlock_t       lock;
+       kgid_t          range[2];
+};
+
 struct netns_ipv4 {
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *forw_hdr;
@@ -66,13 +71,13 @@ struct netns_ipv4 {
        int sysctl_icmp_ratemask;
        int sysctl_icmp_errors_use_inbound_ifaddr;
 
-       struct local_ports sysctl_local_ports;
+       struct local_ports ip_local_ports;
 
        int sysctl_tcp_ecn;
        int sysctl_ip_no_pmtu_disc;
        int sysctl_ip_fwd_use_pmtu;
 
-       kgid_t sysctl_ping_group_range[2];
+       struct ping_group_range ping_group_range;
 
        atomic_t dev_addr_genid;
 
index ed0b2c599a64f7d701117bf54ba6a4dcd56edb31..7c5cbfe3fc49d6761bccc004c9475b16fc2a3dbe 100644 (file)
@@ -80,7 +80,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
 
        TP_fast_assign(
                __entry->ip     = ip;
-               __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
+               __entry->refcnt = __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs);
                __assign_str(name, mod->name);
        ),
 
index f863428796d532a42df39e45527da2f8c6eb9c88..c6d10af50123e1c5810aff634400e8c273949221 100644 (file)
 # define RLIM_INFINITY         (~0UL)
 #endif
 
-/*
- * RLIMIT_STACK default maximum - some architectures override it:
- */
-#ifndef _STK_LIM_MAX
-# define _STK_LIM_MAX          RLIM_INFINITY
-#endif
-
 
 #endif /* _UAPI_ASM_GENERIC_RESOURCE_H */
index 6db66783d268d9a286a86b836c9277bd89f5ac13..3336406080874bf2bd06cb1f0563aea6d8e56650 100644 (file)
@@ -697,9 +697,11 @@ __SYSCALL(__NR_finit_module, sys_finit_module)
 __SYSCALL(__NR_sched_setattr, sys_sched_setattr)
 #define __NR_sched_getattr 275
 __SYSCALL(__NR_sched_getattr, sys_sched_getattr)
+#define __NR_renameat2 276
+__SYSCALL(__NR_renameat2, sys_renameat2)
 
 #undef __NR_syscalls
-#define __NR_syscalls 276
+#define __NR_syscalls 277
 
 /*
  * All syscalls below here should go away really,
index 11917f747cb401be5b7dc8ab788fa5d0d4e8e47c..1b1efddb91cd924140fd5ad3009f338b31925e05 100644 (file)
@@ -331,9 +331,17 @@ enum {
 #define AUDIT_FAIL_PRINTK      1
 #define AUDIT_FAIL_PANIC       2
 
+/*
+ * These bits disambiguate different calling conventions that share an
+ * ELF machine type, bitness, and endianness
+ */
+#define __AUDIT_ARCH_CONVENTION_MASK 0x30000000
+#define __AUDIT_ARCH_CONVENTION_MIPS64_N32 0x20000000
+
 /* distinguish syscall tables */
 #define __AUDIT_ARCH_64BIT 0x80000000
 #define __AUDIT_ARCH_LE           0x40000000
+
 #define AUDIT_ARCH_ALPHA       (EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ARM         (EM_ARM|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ARMEB       (EM_ARM)
@@ -346,7 +354,11 @@ enum {
 #define AUDIT_ARCH_MIPS                (EM_MIPS)
 #define AUDIT_ARCH_MIPSEL      (EM_MIPS|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_MIPS64      (EM_MIPS|__AUDIT_ARCH_64BIT)
+#define AUDIT_ARCH_MIPS64N32   (EM_MIPS|__AUDIT_ARCH_64BIT|\
+                                __AUDIT_ARCH_CONVENTION_MIPS64_N32)
 #define AUDIT_ARCH_MIPSEL64    (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE\
+                                __AUDIT_ARCH_CONVENTION_MIPS64_N32)
 #define AUDIT_ARCH_OPENRISC    (EM_OPENRISC)
 #define AUDIT_ARCH_PARISC      (EM_PARISC)
 #define AUDIT_ARCH_PARISC64    (EM_PARISC|__AUDIT_ARCH_64BIT)
index 1ba9d626aa833db91c462560f27054b30e91939d..194c1eab04d8ad9cb37ae855d8c011d48722c829 100644 (file)
@@ -3856,6 +3856,8 @@ enum nl80211_ap_sme_features {
  * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
  *     to work properly to suppport receiving regulatory hints from
  *     cellular base stations.
+ * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: (no longer available, only
+ *     here to reserve the value for API/ABI compatibility)
  * @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of
  *     equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station
  *     mode
@@ -3897,7 +3899,7 @@ enum nl80211_feature_flags {
        NL80211_FEATURE_HT_IBSS                         = 1 << 1,
        NL80211_FEATURE_INACTIVITY_TIMER                = 1 << 2,
        NL80211_FEATURE_CELL_BASE_REG_HINTS             = 1 << 3,
-       /* bit 4 is reserved - don't use */
+       NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL        = 1 << 4,
        NL80211_FEATURE_SAE                             = 1 << 5,
        NL80211_FEATURE_LOW_PRIORITY_SCAN               = 1 << 6,
        NL80211_FEATURE_SCAN_FLUSH                      = 1 << 7,
index 9c7fd4c9249f2c72395fcaf2ac953f782a3e2b59..48655ceb66f45cdf27cf8bf769d9de296175a21c 100644 (file)
@@ -476,7 +476,7 @@ static void __init mm_init(void)
        vmalloc_init();
 }
 
-asmlinkage void __init start_kernel(void)
+asmlinkage __visible void __init start_kernel(void)
 {
        char * command_line;
        extern const struct kernel_param __start___param[], __stop___param[];
index 9fcdaa705b6cb7442b4babcb3a9ab40564afa27b..3f1ca934a2378495e5129dbe807bfc7111f53e8b 100644 (file)
@@ -348,7 +348,7 @@ struct cgrp_cset_link {
  * reference-counted, to improve performance when child cgroups
  * haven't been created.
  */
-static struct css_set init_css_set = {
+struct css_set init_css_set = {
        .refcount               = ATOMIC_INIT(1),
        .cgrp_links             = LIST_HEAD_INIT(init_css_set.cgrp_links),
        .tasks                  = LIST_HEAD_INIT(init_css_set.tasks),
@@ -1495,7 +1495,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
         */
        if (!use_task_css_set_links)
                cgroup_enable_task_cg_lists();
-retry:
+
        mutex_lock(&cgroup_tree_mutex);
        mutex_lock(&cgroup_mutex);
 
@@ -1503,7 +1503,7 @@ retry:
        ret = parse_cgroupfs_options(data, &opts);
        if (ret)
                goto out_unlock;
-
+retry:
        /* look for a matching existing root */
        if (!opts.subsys_mask && !opts.none && !opts.name) {
                cgrp_dfl_root_visible = true;
@@ -1562,9 +1562,9 @@ retry:
                if (!atomic_inc_not_zero(&root->cgrp.refcnt)) {
                        mutex_unlock(&cgroup_mutex);
                        mutex_unlock(&cgroup_tree_mutex);
-                       kfree(opts.release_agent);
-                       kfree(opts.name);
                        msleep(10);
+                       mutex_lock(&cgroup_tree_mutex);
+                       mutex_lock(&cgroup_mutex);
                        goto retry;
                }
 
index 2bc4a2256444ebf500269c5dc276871e48e0efa8..345628c78b5b3779460038ec6f036f9e8b7c1a32 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/uaccess.h>
 #include <linux/freezer.h>
 #include <linux/seq_file.h>
+#include <linux/mutex.h>
 
 /*
  * A cgroup is freezing if any FREEZING flags are set.  FREEZING_SELF is
@@ -42,9 +43,10 @@ enum freezer_state_flags {
 struct freezer {
        struct cgroup_subsys_state      css;
        unsigned int                    state;
-       spinlock_t                      lock;
 };
 
+static DEFINE_MUTEX(freezer_mutex);
+
 static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
 {
        return css ? container_of(css, struct freezer, css) : NULL;
@@ -93,7 +95,6 @@ freezer_css_alloc(struct cgroup_subsys_state *parent_css)
        if (!freezer)
                return ERR_PTR(-ENOMEM);
 
-       spin_lock_init(&freezer->lock);
        return &freezer->css;
 }
 
@@ -110,14 +111,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css)
        struct freezer *freezer = css_freezer(css);
        struct freezer *parent = parent_freezer(freezer);
 
-       /*
-        * The following double locking and freezing state inheritance
-        * guarantee that @cgroup can never escape ancestors' freezing
-        * states.  See css_for_each_descendant_pre() for details.
-        */
-       if (parent)
-               spin_lock_irq(&parent->lock);
-       spin_lock_nested(&freezer->lock, SINGLE_DEPTH_NESTING);
+       mutex_lock(&freezer_mutex);
 
        freezer->state |= CGROUP_FREEZER_ONLINE;
 
@@ -126,10 +120,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css)
                atomic_inc(&system_freezing_cnt);
        }
 
-       spin_unlock(&freezer->lock);
-       if (parent)
-               spin_unlock_irq(&parent->lock);
-
+       mutex_unlock(&freezer_mutex);
        return 0;
 }
 
@@ -144,14 +135,14 @@ static void freezer_css_offline(struct cgroup_subsys_state *css)
 {
        struct freezer *freezer = css_freezer(css);
 
-       spin_lock_irq(&freezer->lock);
+       mutex_lock(&freezer_mutex);
 
        if (freezer->state & CGROUP_FREEZING)
                atomic_dec(&system_freezing_cnt);
 
        freezer->state = 0;
 
-       spin_unlock_irq(&freezer->lock);
+       mutex_unlock(&freezer_mutex);
 }
 
 static void freezer_css_free(struct cgroup_subsys_state *css)
@@ -175,7 +166,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
        struct task_struct *task;
        bool clear_frozen = false;
 
-       spin_lock_irq(&freezer->lock);
+       mutex_lock(&freezer_mutex);
 
        /*
         * Make the new tasks conform to the current state of @new_css.
@@ -197,21 +188,13 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
                }
        }
 
-       spin_unlock_irq(&freezer->lock);
-
-       /*
-        * Propagate FROZEN clearing upwards.  We may race with
-        * update_if_frozen(), but as long as both work bottom-up, either
-        * update_if_frozen() sees child's FROZEN cleared or we clear the
-        * parent's FROZEN later.  No parent w/ !FROZEN children can be
-        * left FROZEN.
-        */
+       /* propagate FROZEN clearing upwards */
        while (clear_frozen && (freezer = parent_freezer(freezer))) {
-               spin_lock_irq(&freezer->lock);
                freezer->state &= ~CGROUP_FROZEN;
                clear_frozen = freezer->state & CGROUP_FREEZING;
-               spin_unlock_irq(&freezer->lock);
        }
+
+       mutex_unlock(&freezer_mutex);
 }
 
 /**
@@ -228,9 +211,6 @@ static void freezer_fork(struct task_struct *task)
 {
        struct freezer *freezer;
 
-       rcu_read_lock();
-       freezer = task_freezer(task);
-
        /*
         * The root cgroup is non-freezable, so we can skip locking the
         * freezer.  This is safe regardless of race with task migration.
@@ -238,24 +218,18 @@ static void freezer_fork(struct task_struct *task)
         * to do.  If we lost and root is the new cgroup, noop is still the
         * right thing to do.
         */
-       if (!parent_freezer(freezer))
-               goto out;
+       if (task_css_is_root(task, freezer_cgrp_id))
+               return;
 
-       /*
-        * Grab @freezer->lock and freeze @task after verifying @task still
-        * belongs to @freezer and it's freezing.  The former is for the
-        * case where we have raced against task migration and lost and
-        * @task is already in a different cgroup which may not be frozen.
-        * This isn't strictly necessary as freeze_task() is allowed to be
-        * called spuriously but let's do it anyway for, if nothing else,
-        * documentation.
-        */
-       spin_lock_irq(&freezer->lock);
-       if (freezer == task_freezer(task) && (freezer->state & CGROUP_FREEZING))
+       mutex_lock(&freezer_mutex);
+       rcu_read_lock();
+
+       freezer = task_freezer(task);
+       if (freezer->state & CGROUP_FREEZING)
                freeze_task(task);
-       spin_unlock_irq(&freezer->lock);
-out:
+
        rcu_read_unlock();
+       mutex_unlock(&freezer_mutex);
 }
 
 /**
@@ -281,22 +255,24 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
        struct css_task_iter it;
        struct task_struct *task;
 
-       WARN_ON_ONCE(!rcu_read_lock_held());
-
-       spin_lock_irq(&freezer->lock);
+       lockdep_assert_held(&freezer_mutex);
 
        if (!(freezer->state & CGROUP_FREEZING) ||
            (freezer->state & CGROUP_FROZEN))
-               goto out_unlock;
+               return;
 
        /* are all (live) children frozen? */
+       rcu_read_lock();
        css_for_each_child(pos, css) {
                struct freezer *child = css_freezer(pos);
 
                if ((child->state & CGROUP_FREEZER_ONLINE) &&
-                   !(child->state & CGROUP_FROZEN))
-                       goto out_unlock;
+                   !(child->state & CGROUP_FROZEN)) {
+                       rcu_read_unlock();
+                       return;
+               }
        }
+       rcu_read_unlock();
 
        /* are all tasks frozen? */
        css_task_iter_start(css, &it);
@@ -317,21 +293,29 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
        freezer->state |= CGROUP_FROZEN;
 out_iter_end:
        css_task_iter_end(&it);
-out_unlock:
-       spin_unlock_irq(&freezer->lock);
 }
 
 static int freezer_read(struct seq_file *m, void *v)
 {
        struct cgroup_subsys_state *css = seq_css(m), *pos;
 
+       mutex_lock(&freezer_mutex);
        rcu_read_lock();
 
        /* update states bottom-up */
-       css_for_each_descendant_post(pos, css)
+       css_for_each_descendant_post(pos, css) {
+               if (!css_tryget(pos))
+                       continue;
+               rcu_read_unlock();
+
                update_if_frozen(pos);
 
+               rcu_read_lock();
+               css_put(pos);
+       }
+
        rcu_read_unlock();
+       mutex_unlock(&freezer_mutex);
 
        seq_puts(m, freezer_state_strs(css_freezer(css)->state));
        seq_putc(m, '\n');
@@ -373,7 +357,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
                                unsigned int state)
 {
        /* also synchronizes against task migration, see freezer_attach() */
-       lockdep_assert_held(&freezer->lock);
+       lockdep_assert_held(&freezer_mutex);
 
        if (!(freezer->state & CGROUP_FREEZER_ONLINE))
                return;
@@ -414,31 +398,29 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
         * descendant will try to inherit its parent's FREEZING state as
         * CGROUP_FREEZING_PARENT.
         */
+       mutex_lock(&freezer_mutex);
        rcu_read_lock();
        css_for_each_descendant_pre(pos, &freezer->css) {
                struct freezer *pos_f = css_freezer(pos);
                struct freezer *parent = parent_freezer(pos_f);
 
-               spin_lock_irq(&pos_f->lock);
+               if (!css_tryget(pos))
+                       continue;
+               rcu_read_unlock();
 
-               if (pos_f == freezer) {
+               if (pos_f == freezer)
                        freezer_apply_state(pos_f, freeze,
                                            CGROUP_FREEZING_SELF);
-               } else {
-                       /*
-                        * Our update to @parent->state is already visible
-                        * which is all we need.  No need to lock @parent.
-                        * For more info on synchronization, see
-                        * freezer_post_create().
-                        */
+               else
                        freezer_apply_state(pos_f,
                                            parent->state & CGROUP_FREEZING,
                                            CGROUP_FREEZING_PARENT);
-               }
 
-               spin_unlock_irq(&pos_f->lock);
+               rcu_read_lock();
+               css_put(pos);
        }
        rcu_read_unlock();
+       mutex_unlock(&freezer_mutex);
 }
 
 static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft,
index 6cb20d2e7ee0d28b0d6399a8cd1f3e23f9366899..019d45008448cc54160fd25edd5bbde1cc279e50 100644 (file)
@@ -120,7 +120,7 @@ void context_tracking_user_enter(void)
  * instead of preempt_schedule() to exit user context if needed before
  * calling the scheduler.
  */
-asmlinkage void __sched notrace preempt_schedule_context(void)
+asmlinkage __visible void __sched notrace preempt_schedule_context(void)
 {
        enum ctx_state prev_ctx;
 
index f83a71a3e46d75547e540ed317f99531838f408b..440eefc67397e48f15b58bc8cf31712bec91286b 100644 (file)
@@ -1443,6 +1443,11 @@ group_sched_out(struct perf_event *group_event,
                cpuctx->exclusive = 0;
 }
 
+struct remove_event {
+       struct perf_event *event;
+       bool detach_group;
+};
+
 /*
  * Cross CPU call to remove a performance event
  *
@@ -1451,12 +1456,15 @@ group_sched_out(struct perf_event *group_event,
  */
 static int __perf_remove_from_context(void *info)
 {
-       struct perf_event *event = info;
+       struct remove_event *re = info;
+       struct perf_event *event = re->event;
        struct perf_event_context *ctx = event->ctx;
        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 
        raw_spin_lock(&ctx->lock);
        event_sched_out(event, cpuctx, ctx);
+       if (re->detach_group)
+               perf_group_detach(event);
        list_del_event(event, ctx);
        if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
                ctx->is_active = 0;
@@ -1481,10 +1489,14 @@ static int __perf_remove_from_context(void *info)
  * When called from perf_event_exit_task, it's OK because the
  * context has been detached from its task.
  */
-static void perf_remove_from_context(struct perf_event *event)
+static void perf_remove_from_context(struct perf_event *event, bool detach_group)
 {
        struct perf_event_context *ctx = event->ctx;
        struct task_struct *task = ctx->task;
+       struct remove_event re = {
+               .event = event,
+               .detach_group = detach_group,
+       };
 
        lockdep_assert_held(&ctx->mutex);
 
@@ -1493,12 +1505,12 @@ static void perf_remove_from_context(struct perf_event *event)
                 * Per cpu events are removed via an smp call and
                 * the removal is always successful.
                 */
-               cpu_function_call(event->cpu, __perf_remove_from_context, event);
+               cpu_function_call(event->cpu, __perf_remove_from_context, &re);
                return;
        }
 
 retry:
-       if (!task_function_call(task, __perf_remove_from_context, event))
+       if (!task_function_call(task, __perf_remove_from_context, &re))
                return;
 
        raw_spin_lock_irq(&ctx->lock);
@@ -1515,6 +1527,8 @@ retry:
         * Since the task isn't running, its safe to remove the event, us
         * holding the ctx->lock ensures the task won't get scheduled in.
         */
+       if (detach_group)
+               perf_group_detach(event);
        list_del_event(event, ctx);
        raw_spin_unlock_irq(&ctx->lock);
 }
@@ -3178,7 +3192,8 @@ static void free_event_rcu(struct rcu_head *head)
 }
 
 static void ring_buffer_put(struct ring_buffer *rb);
-static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
+static void ring_buffer_attach(struct perf_event *event,
+                              struct ring_buffer *rb);
 
 static void unaccount_event_cpu(struct perf_event *event, int cpu)
 {
@@ -3238,8 +3253,6 @@ static void free_event(struct perf_event *event)
        unaccount_event(event);
 
        if (event->rb) {
-               struct ring_buffer *rb;
-
                /*
                 * Can happen when we close an event with re-directed output.
                 *
@@ -3247,12 +3260,7 @@ static void free_event(struct perf_event *event)
                 * over us; possibly making our ring_buffer_put() the last.
                 */
                mutex_lock(&event->mmap_mutex);
-               rb = event->rb;
-               if (rb) {
-                       rcu_assign_pointer(event->rb, NULL);
-                       ring_buffer_detach(event, rb);
-                       ring_buffer_put(rb); /* could be last */
-               }
+               ring_buffer_attach(event, NULL);
                mutex_unlock(&event->mmap_mutex);
        }
 
@@ -3281,10 +3289,7 @@ int perf_event_release_kernel(struct perf_event *event)
         *     to trigger the AB-BA case.
         */
        mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
-       raw_spin_lock_irq(&ctx->lock);
-       perf_group_detach(event);
-       raw_spin_unlock_irq(&ctx->lock);
-       perf_remove_from_context(event);
+       perf_remove_from_context(event, true);
        mutex_unlock(&ctx->mutex);
 
        free_event(event);
@@ -3839,28 +3844,47 @@ unlock:
 static void ring_buffer_attach(struct perf_event *event,
                               struct ring_buffer *rb)
 {
+       struct ring_buffer *old_rb = NULL;
        unsigned long flags;
 
-       if (!list_empty(&event->rb_entry))
-               return;
+       if (event->rb) {
+               /*
+                * Should be impossible, we set this when removing
+                * event->rb_entry and wait/clear when adding event->rb_entry.
+                */
+               WARN_ON_ONCE(event->rcu_pending);
 
-       spin_lock_irqsave(&rb->event_lock, flags);
-       if (list_empty(&event->rb_entry))
-               list_add(&event->rb_entry, &rb->event_list);
-       spin_unlock_irqrestore(&rb->event_lock, flags);
-}
+               old_rb = event->rb;
+               event->rcu_batches = get_state_synchronize_rcu();
+               event->rcu_pending = 1;
 
-static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
-{
-       unsigned long flags;
+               spin_lock_irqsave(&old_rb->event_lock, flags);
+               list_del_rcu(&event->rb_entry);
+               spin_unlock_irqrestore(&old_rb->event_lock, flags);
+       }
 
-       if (list_empty(&event->rb_entry))
-               return;
+       if (event->rcu_pending && rb) {
+               cond_synchronize_rcu(event->rcu_batches);
+               event->rcu_pending = 0;
+       }
+
+       if (rb) {
+               spin_lock_irqsave(&rb->event_lock, flags);
+               list_add_rcu(&event->rb_entry, &rb->event_list);
+               spin_unlock_irqrestore(&rb->event_lock, flags);
+       }
+
+       rcu_assign_pointer(event->rb, rb);
 
-       spin_lock_irqsave(&rb->event_lock, flags);
-       list_del_init(&event->rb_entry);
-       wake_up_all(&event->waitq);
-       spin_unlock_irqrestore(&rb->event_lock, flags);
+       if (old_rb) {
+               ring_buffer_put(old_rb);
+               /*
+                * Since we detached before setting the new rb, so that we
+                * could attach the new rb, we could have missed a wakeup.
+                * Provide it now.
+                */
+               wake_up_all(&event->waitq);
+       }
 }
 
 static void ring_buffer_wakeup(struct perf_event *event)
@@ -3929,7 +3953,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
 {
        struct perf_event *event = vma->vm_file->private_data;
 
-       struct ring_buffer *rb = event->rb;
+       struct ring_buffer *rb = ring_buffer_get(event);
        struct user_struct *mmap_user = rb->mmap_user;
        int mmap_locked = rb->mmap_locked;
        unsigned long size = perf_data_size(rb);
@@ -3937,18 +3961,14 @@ static void perf_mmap_close(struct vm_area_struct *vma)
        atomic_dec(&rb->mmap_count);
 
        if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
-               return;
+               goto out_put;
 
-       /* Detach current event from the buffer. */
-       rcu_assign_pointer(event->rb, NULL);
-       ring_buffer_detach(event, rb);
+       ring_buffer_attach(event, NULL);
        mutex_unlock(&event->mmap_mutex);
 
        /* If there's still other mmap()s of this buffer, we're done. */
-       if (atomic_read(&rb->mmap_count)) {
-               ring_buffer_put(rb); /* can't be last */
-               return;
-       }
+       if (atomic_read(&rb->mmap_count))
+               goto out_put;
 
        /*
         * No other mmap()s, detach from all other events that might redirect
@@ -3978,11 +3998,9 @@ again:
                 * still restart the iteration to make sure we're not now
                 * iterating the wrong list.
                 */
-               if (event->rb == rb) {
-                       rcu_assign_pointer(event->rb, NULL);
-                       ring_buffer_detach(event, rb);
-                       ring_buffer_put(rb); /* can't be last, we still have one */
-               }
+               if (event->rb == rb)
+                       ring_buffer_attach(event, NULL);
+
                mutex_unlock(&event->mmap_mutex);
                put_event(event);
 
@@ -4007,6 +4025,7 @@ again:
        vma->vm_mm->pinned_vm -= mmap_locked;
        free_uid(mmap_user);
 
+out_put:
        ring_buffer_put(rb); /* could be last */
 }
 
@@ -4124,7 +4143,6 @@ again:
        vma->vm_mm->pinned_vm += extra;
 
        ring_buffer_attach(event, rb);
-       rcu_assign_pointer(event->rb, rb);
 
        perf_event_init_userpage(event);
        perf_event_update_userpage(event);
@@ -5408,6 +5426,9 @@ struct swevent_htable {
 
        /* Recursion avoidance in each contexts */
        int                             recursion[PERF_NR_CONTEXTS];
+
+       /* Keeps track of cpu being initialized/exited */
+       bool                            online;
 };
 
 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
@@ -5654,8 +5675,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
        hwc->state = !(flags & PERF_EF_START);
 
        head = find_swevent_head(swhash, event);
-       if (WARN_ON_ONCE(!head))
+       if (!head) {
+               /*
+                * We can race with cpu hotplug code. Do not
+                * WARN if the cpu just got unplugged.
+                */
+               WARN_ON_ONCE(swhash->online);
                return -EINVAL;
+       }
 
        hlist_add_head_rcu(&event->hlist_entry, head);
 
@@ -6914,7 +6941,7 @@ err_size:
 static int
 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
 {
-       struct ring_buffer *rb = NULL, *old_rb = NULL;
+       struct ring_buffer *rb = NULL;
        int ret = -EINVAL;
 
        if (!output_event)
@@ -6942,8 +6969,6 @@ set:
        if (atomic_read(&event->mmap_count))
                goto unlock;
 
-       old_rb = event->rb;
-
        if (output_event) {
                /* get the rb we want to redirect to */
                rb = ring_buffer_get(output_event);
@@ -6951,23 +6976,7 @@ set:
                        goto unlock;
        }
 
-       if (old_rb)
-               ring_buffer_detach(event, old_rb);
-
-       if (rb)
-               ring_buffer_attach(event, rb);
-
-       rcu_assign_pointer(event->rb, rb);
-
-       if (old_rb) {
-               ring_buffer_put(old_rb);
-               /*
-                * Since we detached before setting the new rb, so that we
-                * could attach the new rb, we could have missed a wakeup.
-                * Provide it now.
-                */
-               wake_up_all(&event->waitq);
-       }
+       ring_buffer_attach(event, rb);
 
        ret = 0;
 unlock:
@@ -7018,6 +7027,9 @@ SYSCALL_DEFINE5(perf_event_open,
        if (attr.freq) {
                if (attr.sample_freq > sysctl_perf_event_sample_rate)
                        return -EINVAL;
+       } else {
+               if (attr.sample_period & (1ULL << 63))
+                       return -EINVAL;
        }
 
        /*
@@ -7165,7 +7177,7 @@ SYSCALL_DEFINE5(perf_event_open,
                struct perf_event_context *gctx = group_leader->ctx;
 
                mutex_lock(&gctx->mutex);
-               perf_remove_from_context(group_leader);
+               perf_remove_from_context(group_leader, false);
 
                /*
                 * Removing from the context ends up with disabled
@@ -7175,7 +7187,7 @@ SYSCALL_DEFINE5(perf_event_open,
                perf_event__state_init(group_leader);
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
-                       perf_remove_from_context(sibling);
+                       perf_remove_from_context(sibling, false);
                        perf_event__state_init(sibling);
                        put_ctx(gctx);
                }
@@ -7305,7 +7317,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
        mutex_lock(&src_ctx->mutex);
        list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
                                 event_entry) {
-               perf_remove_from_context(event);
+               perf_remove_from_context(event, false);
                unaccount_event_cpu(event, src_cpu);
                put_ctx(src_ctx);
                list_add(&event->migrate_entry, &events);
@@ -7367,13 +7379,7 @@ __perf_event_exit_task(struct perf_event *child_event,
                         struct perf_event_context *child_ctx,
                         struct task_struct *child)
 {
-       if (child_event->parent) {
-               raw_spin_lock_irq(&child_ctx->lock);
-               perf_group_detach(child_event);
-               raw_spin_unlock_irq(&child_ctx->lock);
-       }
-
-       perf_remove_from_context(child_event);
+       perf_remove_from_context(child_event, !!child_event->parent);
 
        /*
         * It can happen that the parent exits first, and has events
@@ -7724,6 +7730,8 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
         * swapped under us.
         */
        parent_ctx = perf_pin_task_context(parent, ctxn);
+       if (!parent_ctx)
+               return 0;
 
        /*
         * No need to check if parent_ctx != NULL here; since we saw
@@ -7835,6 +7843,7 @@ static void perf_event_init_cpu(int cpu)
        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
        mutex_lock(&swhash->hlist_mutex);
+       swhash->online = true;
        if (swhash->hlist_refcount > 0) {
                struct swevent_hlist *hlist;
 
@@ -7857,14 +7866,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
 
 static void __perf_event_exit_context(void *__info)
 {
+       struct remove_event re = { .detach_group = false };
        struct perf_event_context *ctx = __info;
-       struct perf_event *event;
 
        perf_pmu_rotate_stop(ctx->pmu);
 
        rcu_read_lock();
-       list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
-               __perf_remove_from_context(event);
+       list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
+               __perf_remove_from_context(&re);
        rcu_read_unlock();
 }
 
@@ -7892,6 +7901,7 @@ static void perf_event_exit_cpu(int cpu)
        perf_event_exit_cpu_context(cpu);
 
        mutex_lock(&swhash->hlist_mutex);
+       swhash->online = false;
        swevent_hlist_release(swhash);
        mutex_unlock(&swhash->hlist_mutex);
 }
index 6b715c0af1b117b5b61bd32629a00845f0313557..e0501fe7140d7c97daba3f3438b51272cf6d9932 100644 (file)
@@ -990,11 +990,8 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
        /* Remove an active timer from the queue: */
        ret = remove_hrtimer(timer, base);
 
-       /* Switch the timer base, if necessary: */
-       new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
-
        if (mode & HRTIMER_MODE_REL) {
-               tim = ktime_add_safe(tim, new_base->get_time());
+               tim = ktime_add_safe(tim, base->get_time());
                /*
                 * CONFIG_TIME_LOW_RES is a temporary way for architectures
                 * to signal that they simply return xtime in
@@ -1009,6 +1006,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
 
        hrtimer_set_expires_range_ns(timer, tim, delta_ns);
 
+       /* Switch the timer base, if necessary: */
+       new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+
        timer_stats_hrtimer_set_start_info(timer);
 
        leftmost = enqueue_hrtimer(timer, new_base);
index c8380ad203bcd5fe8e78bf9570725730733791f0..28c57069ef681f726f2df133d36e6319d370c25f 100644 (file)
@@ -1683,6 +1683,14 @@ int kernel_kexec(void)
                kexec_in_progress = true;
                kernel_restart_prepare(NULL);
                migrate_to_reboot_cpu();
+
+               /*
+                * migrate_to_reboot_cpu() disables CPU hotplug assuming that
+                * no further code needs to use CPU hotplug (which is true in
+                * the reboot case). However, the kexec path depends on using
+                * CPU hotplug again; so re-enable it here.
+                */
+               cpu_hotplug_enable();
                printk(KERN_EMERG "Starting new kernel\n");
                machine_shutdown();
        }
index b0e9467922e1a476bfe1d4d8503ac7623affcaea..d24e4339b46d3c84d03f6998c44bc14035054aa6 100644 (file)
@@ -4188,7 +4188,7 @@ void debug_show_held_locks(struct task_struct *task)
 }
 EXPORT_SYMBOL_GPL(debug_show_held_locks);
 
-asmlinkage void lockdep_sys_exit(void)
+asmlinkage __visible void lockdep_sys_exit(void)
 {
        struct task_struct *curr = current;
 
index 18fb7a2fb14b315cf3f7f9a7bdaf0173438d57b5..1ea328aafdc9a7437d01e19b46033f7fd7fe5412 100644 (file)
@@ -1586,7 +1586,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
        return -ENOMEM;
 }
 
-asmlinkage int swsusp_save(void)
+asmlinkage __visible int swsusp_save(void)
 {
        unsigned int nr_pages, nr_highmem;
 
index a45b509622952a751fda8f962c143f9ef684aedf..7228258b85eca19e105df60bf3101bbc8a5e30b4 100644 (file)
@@ -1674,7 +1674,7 @@ EXPORT_SYMBOL(printk_emit);
  *
  * See the vsnprintf() documentation for format string extensions over C99.
  */
-asmlinkage int printk(const char *fmt, ...)
+asmlinkage __visible int printk(const char *fmt, ...)
 {
        va_list args;
        int r;
@@ -1737,7 +1737,7 @@ void early_vprintk(const char *fmt, va_list ap)
        }
 }
 
-asmlinkage void early_printk(const char *fmt, ...)
+asmlinkage __visible void early_printk(const char *fmt, ...)
 {
        va_list ap;
 
index 268a45ea238cc84f51ae7612bf0ba3c531b9887f..204d3d281809aa90686b8d4f0199c7d933710908 100644 (file)
@@ -2192,7 +2192,7 @@ static inline void post_schedule(struct rq *rq)
  * schedule_tail - first thing a freshly forked thread must call.
  * @prev: the thread we just switched away from.
  */
-asmlinkage void schedule_tail(struct task_struct *prev)
+asmlinkage __visible void schedule_tail(struct task_struct *prev)
        __releases(rq->lock)
 {
        struct rq *rq = this_rq();
@@ -2592,8 +2592,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
        if (likely(prev->sched_class == class &&
                   rq->nr_running == rq->cfs.h_nr_running)) {
                p = fair_sched_class.pick_next_task(rq, prev);
-               if (likely(p && p != RETRY_TASK))
-                       return p;
+               if (unlikely(p == RETRY_TASK))
+                       goto again;
+
+               /* assumes fair_sched_class->next == idle_sched_class */
+               if (unlikely(!p))
+                       p = idle_sched_class.pick_next_task(rq, prev);
+
+               return p;
        }
 
 again:
@@ -2741,7 +2747,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
                blk_schedule_flush_plug(tsk);
 }
 
-asmlinkage void __sched schedule(void)
+asmlinkage __visible void __sched schedule(void)
 {
        struct task_struct *tsk = current;
 
@@ -2751,7 +2757,7 @@ asmlinkage void __sched schedule(void)
 EXPORT_SYMBOL(schedule);
 
 #ifdef CONFIG_CONTEXT_TRACKING
-asmlinkage void __sched schedule_user(void)
+asmlinkage __visible void __sched schedule_user(void)
 {
        /*
         * If we come here after a random call to set_need_resched(),
@@ -2783,7 +2789,7 @@ void __sched schedule_preempt_disabled(void)
  * off of preempt_enable. Kernel preemptions off return from interrupt
  * occur there and call schedule directly.
  */
-asmlinkage void __sched notrace preempt_schedule(void)
+asmlinkage __visible void __sched notrace preempt_schedule(void)
 {
        /*
         * If there is a non-zero preempt_count or interrupts are disabled,
@@ -2813,7 +2819,7 @@ EXPORT_SYMBOL(preempt_schedule);
  * Note, that this is called and return with irqs disabled. This will
  * protect us against recursive calling from irq.
  */
-asmlinkage void __sched preempt_schedule_irq(void)
+asmlinkage __visible void __sched preempt_schedule_irq(void)
 {
        enum ctx_state prev_state;
 
@@ -3124,6 +3130,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
        dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
        dl_se->dl_throttled = 0;
        dl_se->dl_new = 1;
+       dl_se->dl_yielded = 0;
 }
 
 static void __setscheduler_params(struct task_struct *p,
@@ -3639,6 +3646,7 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
  * sys_sched_setattr - same as above, but with extended sched_attr
  * @pid: the pid in question.
  * @uattr: structure containing the extended parameters.
+ * @flags: for future extension.
  */
 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
                               unsigned int, flags)
@@ -3783,6 +3791,7 @@ err_size:
  * @pid: the pid in question.
  * @uattr: structure containing the extended parameters.
  * @size: sizeof(attr) for fwd/bwd comp.
+ * @flags: for future extension.
  */
 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
                unsigned int, size, unsigned int, flags)
@@ -6017,6 +6026,8 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
                                        ,
                .last_balance           = jiffies,
                .balance_interval       = sd_weight,
+               .max_newidle_lb_cost    = 0,
+               .next_decay_max_lb_cost = jiffies,
        };
        SD_INIT_NAME(sd, NUMA);
        sd->private = &tl->data;
index 5b9bb42b2d47e760f3a7cd17af24ba37d2cf07ea..ab001b5d50487815da984947b592b26038cce794 100644 (file)
@@ -210,7 +210,5 @@ int cpudl_init(struct cpudl *cp)
  */
 void cpudl_cleanup(struct cpudl *cp)
 {
-       /*
-        * nothing to do for the moment
-        */
+       free_cpumask_var(cp->free_cpus);
 }
index 8b836b376d9129760066326eabf5040f72b2e4f3..3031bac8aa3ea990bc7425e835675c7a5a386ab5 100644 (file)
@@ -70,8 +70,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
        int idx = 0;
        int task_pri = convert_prio(p->prio);
 
-       if (task_pri >= MAX_RT_PRIO)
-               return 0;
+       BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
 
        for (idx = 0; idx < task_pri; idx++) {
                struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
index a95097cb4591b5bfa2466adb5600895e782fc661..72fdf06ef8652d5cb443b080f53ac117bd5517ba 100644 (file)
@@ -332,50 +332,50 @@ out:
  * softirq as those do not count in task exec_runtime any more.
  */
 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
-                                               struct rq *rq)
+                                        struct rq *rq, int ticks)
 {
-       cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
+       cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
+       u64 cputime = (__force u64) cputime_one_jiffy;
        u64 *cpustat = kcpustat_this_cpu->cpustat;
 
        if (steal_account_process_tick())
                return;
 
+       cputime *= ticks;
+       scaled *= ticks;
+
        if (irqtime_account_hi_update()) {
-               cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
+               cpustat[CPUTIME_IRQ] += cputime;
        } else if (irqtime_account_si_update()) {
-               cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
+               cpustat[CPUTIME_SOFTIRQ] += cputime;
        } else if (this_cpu_ksoftirqd() == p) {
                /*
                 * ksoftirqd time do not get accounted in cpu_softirq_time.
                 * So, we have to handle it separately here.
                 * Also, p->stime needs to be updated for ksoftirqd.
                 */
-               __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
-                                       CPUTIME_SOFTIRQ);
+               __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
        } else if (user_tick) {
-               account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
+               account_user_time(p, cputimescaled);
        } else if (p == rq->idle) {
-               account_idle_time(cputime_one_jiffy);
+               account_idle_time(cputime);
        } else if (p->flags & PF_VCPU) { /* System time or guest time */
-               account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
+               account_guest_time(p, cputimescaled);
        } else {
-               __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
-                                       CPUTIME_SYSTEM);
+               __account_system_time(p, cputime, scaled,       CPUTIME_SYSTEM);
        }
 }
 
 static void irqtime_account_idle_ticks(int ticks)
 {
-       int i;
        struct rq *rq = this_rq();
 
-       for (i = 0; i < ticks; i++)
-               irqtime_account_process_tick(current, 0, rq);
+       irqtime_account_process_tick(current, 0, rq, ticks);
 }
 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
 static inline void irqtime_account_idle_ticks(int ticks) {}
 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
-                                               struct rq *rq) {}
+                                               struct rq *rq, int nr_ticks) {}
 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
 
 /*
@@ -464,7 +464,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
                return;
 
        if (sched_clock_irqtime) {
-               irqtime_account_process_tick(p, user_tick, rq);
+               irqtime_account_process_tick(p, user_tick, rq, 1);
                return;
        }
 
index b08095786cb8fff0c96773eb5f6a582da503bcb8..800e99b99075141421d82f0bdc07e42f09baea9d 100644 (file)
@@ -528,6 +528,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
        sched_clock_tick();
        update_rq_clock(rq);
        dl_se->dl_throttled = 0;
+       dl_se->dl_yielded = 0;
        if (p->on_rq) {
                enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
                if (task_has_dl_policy(rq->curr))
@@ -893,10 +894,10 @@ static void yield_task_dl(struct rq *rq)
         * We make the task go to sleep until its current deadline by
         * forcing its runtime to zero. This way, update_curr_dl() stops
         * it and the bandwidth timer will wake it up and will give it
-        * new scheduling parameters (thanks to dl_new=1).
+        * new scheduling parameters (thanks to dl_yielded=1).
         */
        if (p->dl.runtime > 0) {
-               rq->curr->dl.dl_new = 1;
+               rq->curr->dl.dl_yielded = 1;
                p->dl.runtime = 0;
        }
        update_curr_dl(rq);
index 7570dd969c2838e9aab12ba9cb2c24cb87e21855..0fdb96de81a5b8a92c302961769cdefdb5cad915 100644 (file)
@@ -6653,6 +6653,7 @@ static int idle_balance(struct rq *this_rq)
        int this_cpu = this_rq->cpu;
 
        idle_enter_fair(this_rq);
+
        /*
         * We must set idle_stamp _before_ calling idle_balance(), such that we
         * measure the duration of idle_balance() as idle time.
@@ -6705,14 +6706,16 @@ static int idle_balance(struct rq *this_rq)
 
        raw_spin_lock(&this_rq->lock);
 
+       if (curr_cost > this_rq->max_idle_balance_cost)
+               this_rq->max_idle_balance_cost = curr_cost;
+
        /*
-        * While browsing the domains, we released the rq lock.
-        * A task could have be enqueued in the meantime
+        * While browsing the domains, we released the rq lock, a task could
+        * have been enqueued in the meantime. Since we're not going idle,
+        * pretend we pulled a task.
         */
-       if (this_rq->cfs.h_nr_running && !pulled_task) {
+       if (this_rq->cfs.h_nr_running && !pulled_task)
                pulled_task = 1;
-               goto out;
-       }
 
        if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
                /*
@@ -6722,9 +6725,6 @@ static int idle_balance(struct rq *this_rq)
                this_rq->next_balance = next_balance;
        }
 
-       if (curr_cost > this_rq->max_idle_balance_cost)
-               this_rq->max_idle_balance_cost = curr_cost;
-
 out:
        /* Is there a task of a high priority class? */
        if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
index 33e4648ae0e7cd908671ef1a8ab60bbb562c097c..92f24f5e8d5281aa03cf51e77d26297b91643769 100644 (file)
@@ -223,7 +223,7 @@ static inline bool lockdep_softirq_start(void) { return false; }
 static inline void lockdep_softirq_end(bool in_hardirq) { }
 #endif
 
-asmlinkage void __do_softirq(void)
+asmlinkage __visible void __do_softirq(void)
 {
        unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
        unsigned long old_flags = current->flags;
@@ -299,7 +299,7 @@ restart:
        tsk_restore_flags(current, old_flags, PF_MEMALLOC);
 }
 
-asmlinkage void do_softirq(void)
+asmlinkage __visible void do_softirq(void)
 {
        __u32 pending;
        unsigned long flags;
index ac5b23cf7212c6ebb0045bedce2f13ee8d0a19b3..6620e5837ce2e361e6014caca35632ae48ab67cb 100644 (file)
@@ -188,7 +188,6 @@ static int tracepoint_add_func(struct tracepoint *tp,
                WARN_ON_ONCE(1);
                return PTR_ERR(old);
        }
-       release_probes(old);
 
        /*
         * rcu_assign_pointer has a smp_wmb() which makes sure that the new
@@ -200,6 +199,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
        rcu_assign_pointer(tp->funcs, tp_funcs);
        if (!static_key_enabled(&tp->key))
                static_key_slow_inc(&tp->key);
+       release_probes(old);
        return 0;
 }
 
@@ -221,7 +221,6 @@ static int tracepoint_remove_func(struct tracepoint *tp,
                WARN_ON_ONCE(1);
                return PTR_ERR(old);
        }
-       release_probes(old);
 
        if (!tp_funcs) {
                /* Removed last function */
@@ -232,6 +231,7 @@ static int tracepoint_remove_func(struct tracepoint *tp,
                        static_key_slow_dec(&tp->key);
        }
        rcu_assign_pointer(tp->funcs, tp_funcs);
+       release_probes(old);
        return 0;
 }
 
index 0ee63af30bd14a4ad7f4b8f846d19b100fd596b3..8edc87185427cb17fa02ed93498fcf6f8301cb7e 100644 (file)
@@ -1916,6 +1916,12 @@ static void send_mayday(struct work_struct *work)
 
        /* mayday mayday mayday */
        if (list_empty(&pwq->mayday_node)) {
+               /*
+                * If @pwq is for an unbound wq, its base ref may be put at
+                * any time due to an attribute change.  Pin @pwq until the
+                * rescuer is done with it.
+                */
+               get_pwq(pwq);
                list_add_tail(&pwq->mayday_node, &wq->maydays);
                wake_up_process(wq->rescuer->task);
        }
@@ -2398,6 +2404,7 @@ static int rescuer_thread(void *__rescuer)
        struct worker *rescuer = __rescuer;
        struct workqueue_struct *wq = rescuer->rescue_wq;
        struct list_head *scheduled = &rescuer->scheduled;
+       bool should_stop;
 
        set_user_nice(current, RESCUER_NICE_LEVEL);
 
@@ -2409,11 +2416,15 @@ static int rescuer_thread(void *__rescuer)
 repeat:
        set_current_state(TASK_INTERRUPTIBLE);
 
-       if (kthread_should_stop()) {
-               __set_current_state(TASK_RUNNING);
-               rescuer->task->flags &= ~PF_WQ_WORKER;
-               return 0;
-       }
+       /*
+        * By the time the rescuer is requested to stop, the workqueue
+        * shouldn't have any work pending, but @wq->maydays may still have
+        * pwq(s) queued.  This can happen by non-rescuer workers consuming
+        * all the work items before the rescuer got to them.  Go through
+        * @wq->maydays processing before acting on should_stop so that the
+        * list is always empty on exit.
+        */
+       should_stop = kthread_should_stop();
 
        /* see whether any pwq is asking for help */
        spin_lock_irq(&wq_mayday_lock);
@@ -2444,6 +2455,12 @@ repeat:
 
                process_scheduled_works(rescuer);
 
+               /*
+                * Put the reference grabbed by send_mayday().  @pool won't
+                * go away while we're holding its lock.
+                */
+               put_pwq(pwq);
+
                /*
                 * Leave this pool.  If keep_working() is %true, notify a
                 * regular worker; otherwise, we end up with 0 concurrency
@@ -2459,6 +2476,12 @@ repeat:
 
        spin_unlock_irq(&wq_mayday_lock);
 
+       if (should_stop) {
+               __set_current_state(TASK_RUNNING);
+               rescuer->task->flags &= ~PF_WQ_WORKER;
+               return 0;
+       }
+
        /* rescuers should never participate in concurrency management */
        WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
        schedule();
@@ -4100,7 +4123,8 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
        if (!pwq) {
                pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
                           wq->name);
-               goto out_unlock;
+               mutex_lock(&wq->mutex);
+               goto use_dfl_pwq;
        }
 
        /*
index f23b63f0a1c391303e6a13e7fb6495a6354aa9c0..6745c6230db3403629048256968443f51b777655 100644 (file)
@@ -23,7 +23,7 @@ static void __dump_stack(void)
 #ifdef CONFIG_SMP
 static atomic_t dump_lock = ATOMIC_INIT(-1);
 
-asmlinkage void dump_stack(void)
+asmlinkage __visible void dump_stack(void)
 {
        int was_locked;
        int old;
@@ -55,7 +55,7 @@ retry:
        preempt_enable();
 }
 #else
-asmlinkage void dump_stack(void)
+asmlinkage __visible void dump_stack(void)
 {
        __dump_stack();
 }
index ebe5880c29d6cbe2306054f19bda87ffb3daa59a..1b5a95f0fa013ca428e877ae39d6f0148a49cd61 100644 (file)
@@ -581,3 +581,18 @@ config PGTABLE_MAPPING
 
 config GENERIC_EARLY_IOREMAP
        bool
+
+config MAX_STACK_SIZE_MB
+       int "Maximum user stack size for 32-bit processes (MB)"
+       default 80
+       range 8 256 if METAG
+       range 8 2048
+       depends on STACK_GROWSUP && (!64BIT || COMPAT)
+       help
+         This is the maximum stack size in Megabytes in the VM layout of 32-bit
+         user processes when the stack grows upwards (currently only on parisc
+         and metag arch). The stack will be located at the highest memory
+         address minus the given value, unless the RLIMIT_STACK hard limit is
+         changed to a smaller value in which case that is used.
+
+         A sane initial value is 80 MB.
index 2f724e3cdf24187e11bd48d44c4c4f88d42fe199..7499ef19f1c15f4237b695c23d71414eecd97d3a 100644 (file)
@@ -257,9 +257,11 @@ static int filemap_check_errors(struct address_space *mapping)
 {
        int ret = 0;
        /* Check for outstanding write errors */
-       if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
+       if (test_bit(AS_ENOSPC, &mapping->flags) &&
+           test_and_clear_bit(AS_ENOSPC, &mapping->flags))
                ret = -ENOSPC;
-       if (test_and_clear_bit(AS_EIO, &mapping->flags))
+       if (test_bit(AS_EIO, &mapping->flags) &&
+           test_and_clear_bit(AS_EIO, &mapping->flags))
                ret = -EIO;
        return ret;
 }
index 91d67eaee0500796c9e5569fedc7cc5775002dda..8d2fcdfeff7fdb319f58c838cd8b94a6cc59121e 100644 (file)
@@ -1775,10 +1775,9 @@ void __init kmemleak_init(void)
        int i;
        unsigned long flags;
 
-       kmemleak_early_log = 0;
-
 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
        if (!kmemleak_skip_disable) {
+               kmemleak_early_log = 0;
                kmemleak_disable();
                return;
        }
@@ -1796,6 +1795,7 @@ void __init kmemleak_init(void)
 
        /* the kernel is still in UP mode, so disabling the IRQs is enough */
        local_irq_save(flags);
+       kmemleak_early_log = 0;
        if (kmemleak_error) {
                local_irq_restore(flags);
                return;
index 539eeb96b323bf649f83783e0dddcb4f907e1d6e..a402f8fdc68e94888ea177104524085c9f490fd5 100644 (file)
@@ -195,7 +195,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
        for (; start < end; start += PAGE_SIZE) {
                index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 
-               page = find_get_page(mapping, index);
+               page = find_get_entry(mapping, index);
                if (!radix_tree_exceptional_entry(page)) {
                        if (page)
                                page_cache_release(page);
index c47dffdcb246b0cc3120d50d6dfff1a6cd0369ea..5177c6d4a2ddbf6d28ece095287c1a4a36ef9a2a 100644 (file)
@@ -1077,9 +1077,18 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 
        rcu_read_lock();
        do {
-               memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
-               if (unlikely(!memcg))
+               /*
+                * Page cache insertions can happen withou an
+                * actual mm context, e.g. during disk probing
+                * on boot, loopback IO, acct() writes etc.
+                */
+               if (unlikely(!mm))
                        memcg = root_mem_cgroup;
+               else {
+                       memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+                       if (unlikely(!memcg))
+                               memcg = root_mem_cgroup;
+               }
        } while (!css_tryget(&memcg->css));
        rcu_read_unlock();
        return memcg;
@@ -3958,17 +3967,9 @@ int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
                return 0;
        }
 
-       /*
-        * Page cache insertions can happen without an actual mm
-        * context, e.g. during disk probing on boot.
-        */
-       if (unlikely(!mm))
-               memcg = root_mem_cgroup;
-       else {
-               memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
-               if (!memcg)
-                       return -ENOMEM;
-       }
+       memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
+       if (!memcg)
+               return -ENOMEM;
        __mem_cgroup_commit_charge(memcg, page, 1, type, false);
        return 0;
 }
index 35ef28acf137c0ab76393ede3dbc1c3d820f5c37..9ccef39a9de261c96f4e5775d7dca48b63d4d133 100644 (file)
@@ -1081,15 +1081,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                        return 0;
                } else if (PageHuge(hpage)) {
                        /*
-                        * Check "just unpoisoned", "filter hit", and
-                        * "race with other subpage."
+                        * Check "filter hit" and "race with other subpage."
                         */
                        lock_page(hpage);
-                       if (!PageHWPoison(hpage)
-                           || (hwpoison_filter(p) && TestClearPageHWPoison(p))
-                           || (p != hpage && TestSetPageHWPoison(hpage))) {
-                               atomic_long_sub(nr_pages, &num_poisoned_pages);
-                               return 0;
+                       if (PageHWPoison(hpage)) {
+                               if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
+                                   || (p != hpage && TestSetPageHWPoison(hpage))) {
+                                       atomic_long_sub(nr_pages, &num_poisoned_pages);
+                                       unlock_page(hpage);
+                                       return 0;
+                               }
                        }
                        set_page_hwpoison_huge_page(hpage);
                        res = dequeue_hwpoisoned_huge_page(hpage);
@@ -1152,6 +1153,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         */
        if (!PageHWPoison(p)) {
                printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
+               atomic_long_sub(nr_pages, &num_poisoned_pages);
+               put_page(hpage);
                res = 0;
                goto out;
        }
index 0843feb66f3d0236abd4386b5bfd0170c24ae0ef..05f1180e9f21822e99a5f11a2a7a03af663a422c 100644 (file)
@@ -194,10 +194,17 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                        break;
                if (pmd_trans_huge(*old_pmd)) {
                        int err = 0;
-                       if (extent == HPAGE_PMD_SIZE)
+                       if (extent == HPAGE_PMD_SIZE) {
+                               VM_BUG_ON(vma->vm_file || !vma->anon_vma);
+                               /* See comment in move_ptes() */
+                               if (need_rmap_locks)
+                                       anon_vma_lock_write(vma->anon_vma);
                                err = move_huge_pmd(vma, new_vma, old_addr,
                                                    new_addr, old_end,
                                                    old_pmd, new_pmd);
+                               if (need_rmap_locks)
+                                       anon_vma_unlock_write(vma->anon_vma);
+                       }
                        if (err > 0) {
                                need_flush = true;
                                continue;
index 63e24fb4387b6d305960f9e7ba8c0554e6818ca5..2ddf9a990dbd057228782a3af5ac6901a0af632b 100644 (file)
@@ -610,7 +610,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
        chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
                                                sizeof(chunk->map[0]));
        if (!chunk->map) {
-               kfree(chunk);
+               pcpu_mem_free(chunk, pcpu_chunk_struct_size);
                return NULL;
        }
 
index 175273f38cb1bd59f5aeb88cb8c815033475dfe8..44ebd5c2cd4aef0f86bd6475132cc40c501c6fef 100644 (file)
@@ -169,6 +169,7 @@ int register_vlan_dev(struct net_device *dev)
        if (err < 0)
                goto out_uninit_mvrp;
 
+       vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
        err = register_netdevice(dev);
        if (err < 0)
                goto out_uninit_mvrp;
index 733ec283ed1b9e85f9181f67116052f88bb49951..019efb79708f81976bc6484cc371a8fa6c0c080e 100644 (file)
@@ -493,48 +493,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
        }
 }
 
-static int vlan_calculate_locking_subclass(struct net_device *real_dev)
-{
-       int subclass = 0;
-
-       while (is_vlan_dev(real_dev)) {
-               subclass++;
-               real_dev = vlan_dev_priv(real_dev)->real_dev;
-       }
-
-       return subclass;
-}
-
-static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from)
-{
-       int err = 0, subclass;
-
-       subclass = vlan_calculate_locking_subclass(to);
-
-       spin_lock_nested(&to->addr_list_lock, subclass);
-       err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
-       if (!err)
-               __dev_set_rx_mode(to);
-       spin_unlock(&to->addr_list_lock);
-}
-
-static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from)
-{
-       int err = 0, subclass;
-
-       subclass = vlan_calculate_locking_subclass(to);
-
-       spin_lock_nested(&to->addr_list_lock, subclass);
-       err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
-       if (!err)
-               __dev_set_rx_mode(to);
-       spin_unlock(&to->addr_list_lock);
-}
-
 static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
 {
-       vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
-       vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+       dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+       dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
 }
 
 /*
@@ -562,6 +524,11 @@ static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
        netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
 }
 
+static int vlan_dev_get_lock_subclass(struct net_device *dev)
+{
+       return vlan_dev_priv(dev)->nest_level;
+}
+
 static const struct header_ops vlan_header_ops = {
        .create  = vlan_dev_hard_header,
        .rebuild = vlan_dev_rebuild_header,
@@ -597,7 +564,6 @@ static const struct net_device_ops vlan_netdev_ops;
 static int vlan_dev_init(struct net_device *dev)
 {
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
-       int subclass = 0;
 
        netif_carrier_off(dev);
 
@@ -646,8 +612,7 @@ static int vlan_dev_init(struct net_device *dev)
 
        SET_NETDEV_DEVTYPE(dev, &vlan_type);
 
-       subclass = vlan_calculate_locking_subclass(dev);
-       vlan_dev_set_lockdep_class(dev, subclass);
+       vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
 
        vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
@@ -819,6 +784,7 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_netpoll_cleanup    = vlan_dev_netpoll_cleanup,
 #endif
        .ndo_fix_features       = vlan_dev_fix_features,
+       .ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,
 };
 
 void vlan_setup(struct net_device *dev)
index b3bd4ec3fd9452f0d1f9a99dd4782260ab65c818..f04224c32005aa9a732622805915fe9aead9ee3e 100644 (file)
@@ -1545,6 +1545,8 @@ out_neigh:
        if ((orig_neigh_node) && (!is_single_hop_neigh))
                batadv_orig_node_free_ref(orig_neigh_node);
 out:
+       if (router_ifinfo)
+               batadv_neigh_ifinfo_free_ref(router_ifinfo);
        if (router)
                batadv_neigh_node_free_ref(router);
        if (router_router)
index b25fd64d727b0d6e8227671f860b133095df5100..aa5d4946d0d784d32fdcdce217c4f2bb482502f0 100644 (file)
@@ -940,8 +940,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                 * additional DAT answer may trigger kernel warnings about
                 * a packet coming from the wrong port.
                 */
-               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr,
-                                       BATADV_NO_FLAGS)) {
+               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) {
                        ret = true;
                        goto out;
                }
index bcc4bea632fa69ead6567e016f2f265840f7968b..f14e54a0569178e8b423b4921c4b610e4c63039c 100644 (file)
@@ -418,12 +418,13 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
                             struct batadv_neigh_node *neigh_node)
 {
        struct batadv_priv *bat_priv;
-       struct batadv_hard_iface *primary_if;
+       struct batadv_hard_iface *primary_if = NULL;
        struct batadv_frag_packet frag_header;
        struct sk_buff *skb_fragment;
        unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
        unsigned header_size = sizeof(frag_header);
        unsigned max_fragment_size, max_packet_size;
+       bool ret = false;
 
        /* To avoid merge and refragmentation at next-hops we never send
         * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
@@ -483,7 +484,11 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
                           skb->len + ETH_HLEN);
        batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
 
-       return true;
+       ret = true;
+
 out_err:
-       return false;
+       if (primary_if)
+               batadv_hardif_free_ref(primary_if);
+
+       return ret;
 }
index c835e137423bb9ec70b98b5130d5a33c12f9b139..90cff585b37d5a3779cdb8f7b3b90a26eb2df88e 100644 (file)
 
 static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
 {
-       if (atomic_dec_and_test(&gw_node->refcount))
+       if (atomic_dec_and_test(&gw_node->refcount)) {
+               batadv_orig_node_free_ref(gw_node->orig_node);
                kfree_rcu(gw_node, rcu);
+       }
 }
 
 static struct batadv_gw_node *
@@ -406,9 +408,14 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
        if (gateway->bandwidth_down == 0)
                return;
 
+       if (!atomic_inc_not_zero(&orig_node->refcount))
+               return;
+
        gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
-       if (!gw_node)
+       if (!gw_node) {
+               batadv_orig_node_free_ref(orig_node);
                return;
+       }
 
        INIT_HLIST_NODE(&gw_node->list);
        gw_node->orig_node = orig_node;
index b851cc58085330acbab02848fedf3cb01751a060..fbda6b54baffccf798375cb8add49bb179738386 100644 (file)
@@ -83,7 +83,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
                return true;
 
        /* no more parents..stop recursion */
-       if (net_dev->iflink == net_dev->ifindex)
+       if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex)
                return false;
 
        /* recurse over the parent device */
index ffd9dfbd9b0e856e35e2ac6ea594739e8feb614d..6a484514cd3e98b9e0b27a924b4dcb92f2682055 100644 (file)
@@ -501,12 +501,17 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
 static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
 {
        struct batadv_orig_ifinfo *orig_ifinfo;
+       struct batadv_neigh_node *router;
 
        orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
 
        if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
                batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
 
+       /* this is the last reference to this object */
+       router = rcu_dereference_protected(orig_ifinfo->router, true);
+       if (router)
+               batadv_neigh_node_free_ref_now(router);
        kfree(orig_ifinfo);
 }
 
@@ -701,6 +706,47 @@ free_orig_node:
        return NULL;
 }
 
+/**
+ * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
+ * @bat_priv: the bat priv with all the soft interface information
+ * @neigh: orig node which is to be checked
+ */
+static void
+batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
+                         struct batadv_neigh_node *neigh)
+{
+       struct batadv_neigh_ifinfo *neigh_ifinfo;
+       struct batadv_hard_iface *if_outgoing;
+       struct hlist_node *node_tmp;
+
+       spin_lock_bh(&neigh->ifinfo_lock);
+
+       /* for all ifinfo objects for this neighinator */
+       hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
+                                 &neigh->ifinfo_list, list) {
+               if_outgoing = neigh_ifinfo->if_outgoing;
+
+               /* always keep the default interface */
+               if (if_outgoing == BATADV_IF_DEFAULT)
+                       continue;
+
+               /* don't purge if the interface is not (going) down */
+               if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
+                   (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
+                   (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
+                       continue;
+
+               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                          "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
+                          neigh->addr, if_outgoing->net_dev->name);
+
+               hlist_del_rcu(&neigh_ifinfo->list);
+               batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
+       }
+
+       spin_unlock_bh(&neigh->ifinfo_lock);
+}
+
 /**
  * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
  * @bat_priv: the bat priv with all the soft interface information
@@ -800,6 +846,11 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
 
                        hlist_del_rcu(&neigh_node->list);
                        batadv_neigh_node_free_ref(neigh_node);
+               } else {
+                       /* only necessary if not the whole neighbor is to be
+                        * deleted, but some interface has been removed.
+                        */
+                       batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
                }
        }
 
@@ -857,7 +908,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
 {
        struct batadv_neigh_node *best_neigh_node;
        struct batadv_hard_iface *hard_iface;
-       bool changed;
+       bool changed_ifinfo, changed_neigh;
 
        if (batadv_has_timed_out(orig_node->last_seen,
                                 2 * BATADV_PURGE_TIMEOUT)) {
@@ -867,10 +918,10 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
                           jiffies_to_msecs(orig_node->last_seen));
                return true;
        }
-       changed = batadv_purge_orig_ifinfo(bat_priv, orig_node);
-       changed = changed || batadv_purge_orig_neighbors(bat_priv, orig_node);
+       changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
+       changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
 
-       if (!changed)
+       if (!changed_ifinfo && !changed_neigh)
                return false;
 
        /* first for NULL ... */
@@ -1028,7 +1079,8 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
        bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
 
 out:
-       batadv_hardif_free_ref(hard_iface);
+       if (hard_iface)
+               batadv_hardif_free_ref(hard_iface);
        return 0;
 }
 
index 80e1b0f60a30214002684a42b1bab1a02e9d9962..2acf7fa1fec6c2309123dc189ea964f66f230d07 100644 (file)
@@ -859,12 +859,12 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
        return NF_STOLEN;
 }
 
-#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4)
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
 {
        int ret;
 
-       if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
+       if (skb->protocol == htons(ETH_P_IP) &&
            skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
            !skb_is_gso(skb)) {
                if (br_parse_ip_options(skb))
index dac7f9b986877efa88f8e308e783c216563ad243..1948d592aa54c7a1831df546702904898cd68da4 100644 (file)
@@ -557,7 +557,7 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
        return r;
 }
 
-static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
+static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
                     int offset, size_t size, bool more)
 {
        int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
@@ -570,6 +570,24 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
        return ret;
 }
 
+static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
+                    int offset, size_t size, bool more)
+{
+       int ret;
+       struct kvec iov;
+
+       /* sendpage cannot properly handle pages with page_count == 0,
+        * we need to fallback to sendmsg if that's the case */
+       if (page_count(page) >= 1)
+               return __ceph_tcp_sendpage(sock, page, offset, size, more);
+
+       iov.iov_base = kmap(page) + offset;
+       iov.iov_len = size;
+       ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
+       kunmap(page);
+
+       return ret;
+}
 
 /*
  * Shutdown/close the socket for the given connection.
index 8b8a5a24b223ef268c28cf5e5ac5379314bba237..c547e46084d360c14abd34f97b6d1d3592d1c641 100644 (file)
@@ -329,6 +329,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
        dout("crush decode tunable chooseleaf_descend_once = %d",
             c->chooseleaf_descend_once);
 
+       ceph_decode_need(p, end, sizeof(u8), done);
+       c->chooseleaf_vary_r = ceph_decode_8(p);
+       dout("crush decode tunable chooseleaf_vary_r = %d",
+            c->chooseleaf_vary_r);
+
 done:
        dout("crush_decode success\n");
        return c;
index d2c8a06b3a9883b618c55a8cddf0929a4bcd049f..9abc503b19b7dad367b83a8179468ee3e1ae72d5 100644 (file)
@@ -2418,7 +2418,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
  * 2. No high memory really exists on this machine.
  */
 
-static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
+static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
 {
 #ifdef CONFIG_HIGHMEM
        int i;
@@ -2493,38 +2493,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
 }
 
 static netdev_features_t harmonize_features(struct sk_buff *skb,
-                                           const struct net_device *dev,
-                                           netdev_features_t features)
+       netdev_features_t features)
 {
        int tmp;
 
        if (skb->ip_summed != CHECKSUM_NONE &&
            !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
                features &= ~NETIF_F_ALL_CSUM;
-       } else if (illegal_highdma(dev, skb)) {
+       } else if (illegal_highdma(skb->dev, skb)) {
                features &= ~NETIF_F_SG;
        }
 
        return features;
 }
 
-netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
-                                        const struct net_device *dev)
+netdev_features_t netif_skb_features(struct sk_buff *skb)
 {
        __be16 protocol = skb->protocol;
-       netdev_features_t features = dev->features;
+       netdev_features_t features = skb->dev->features;
 
-       if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
+       if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
                features &= ~NETIF_F_GSO_MASK;
 
        if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
                protocol = veh->h_vlan_encapsulated_proto;
        } else if (!vlan_tx_tag_present(skb)) {
-               return harmonize_features(skb, dev, features);
+               return harmonize_features(skb, features);
        }
 
-       features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
+       features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
                                               NETIF_F_HW_VLAN_STAG_TX);
 
        if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2532,9 +2530,9 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
                                NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
                                NETIF_F_HW_VLAN_STAG_TX;
 
-       return harmonize_features(skb, dev, features);
+       return harmonize_features(skb, features);
 }
-EXPORT_SYMBOL(netif_skb_dev_features);
+EXPORT_SYMBOL(netif_skb_features);
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                        struct netdev_queue *txq)
@@ -3953,6 +3951,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        }
        NAPI_GRO_CB(skb)->count = 1;
        NAPI_GRO_CB(skb)->age = jiffies;
+       NAPI_GRO_CB(skb)->last = skb;
        skb_shinfo(skb)->gso_size = skb_gro_len(skb);
        skb->next = napi->gro_list;
        napi->gro_list = skb;
@@ -4542,6 +4541,32 @@ void *netdev_adjacent_get_private(struct list_head *adj_list)
 }
 EXPORT_SYMBOL(netdev_adjacent_get_private);
 
+/**
+ * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next device from the dev's upper list, starting from iter
+ * position. The caller must hold RCU read lock.
+ */
+struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+                                                struct list_head **iter)
+{
+       struct netdev_adjacent *upper;
+
+       WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
+
+       upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
+
+       if (&upper->list == &dev->adj_list.upper)
+               return NULL;
+
+       *iter = &upper->list;
+
+       return upper->dev;
+}
+EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
+
 /**
  * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
  * @dev: device
@@ -4623,6 +4648,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
 
+/**
+ * netdev_lower_get_next - Get the next device from the lower neighbour
+ *                         list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next netdev_adjacent from the dev's lower neighbour
+ * list, starting from iter position. The caller must hold RTNL lock or
+ * its own locking that guarantees that the neighbour lower
+ * list will remain unchainged.
+ */
+void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
+{
+       struct netdev_adjacent *lower;
+
+       lower = list_entry((*iter)->next, struct netdev_adjacent, list);
+
+       if (&lower->list == &dev->adj_list.lower)
+               return NULL;
+
+       *iter = &lower->list;
+
+       return lower->dev;
+}
+EXPORT_SYMBOL(netdev_lower_get_next);
+
 /**
  * netdev_lower_get_first_private_rcu - Get the first ->private from the
  *                                    lower neighbour list, RCU
@@ -5073,6 +5124,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_lower_dev_get_private);
 
+
+int dev_get_nest_level(struct net_device *dev,
+                      bool (*type_check)(struct net_device *dev))
+{
+       struct net_device *lower = NULL;
+       struct list_head *iter;
+       int max_nest = -1;
+       int nest;
+
+       ASSERT_RTNL();
+
+       netdev_for_each_lower_dev(dev, lower, iter) {
+               nest = dev_get_nest_level(lower, type_check);
+               if (max_nest < nest)
+                       max_nest = nest;
+       }
+
+       if (type_check(dev))
+               max_nest++;
+
+       return max_nest;
+}
+EXPORT_SYMBOL(dev_get_nest_level);
+
 static void dev_change_rx_flags(struct net_device *dev, int flags)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -5238,7 +5313,6 @@ void __dev_set_rx_mode(struct net_device *dev)
        if (ops->ndo_set_rx_mode)
                ops->ndo_set_rx_mode(dev);
 }
-EXPORT_SYMBOL(__dev_set_rx_mode);
 
 void dev_set_rx_mode(struct net_device *dev)
 {
@@ -5543,7 +5617,7 @@ static int dev_new_index(struct net *net)
 
 /* Delayed registration/unregisteration */
 static LIST_HEAD(net_todo_list);
-static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
+DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
 
 static void net_set_todo(struct net_device *dev)
 {
index 8f8a96ef9f3f64ba519fe4c872d46c7b7c680ec9..32d872eec7f5c535221898cdb45ab8f235d0b4bb 100644 (file)
@@ -1248,8 +1248,8 @@ void __neigh_set_probe_once(struct neighbour *neigh)
        neigh->updated = jiffies;
        if (!(neigh->nud_state & NUD_FAILED))
                return;
-       neigh->nud_state = NUD_PROBE;
-       atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES));
+       neigh->nud_state = NUD_INCOMPLETE;
+       atomic_set(&neigh->probes, neigh_max_probes(neigh));
        neigh_add_timer(neigh,
                        jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
 }
index 81d3a9a084536541867afe9350602c0c73253006..7c8ffd97496175c60d3947019bfd138bb9746938 100644 (file)
@@ -24,7 +24,7 @@
 
 static LIST_HEAD(pernet_list);
 static struct list_head *first_device = &pernet_list;
-static DEFINE_MUTEX(net_mutex);
+DEFINE_MUTEX(net_mutex);
 
 LIST_HEAD(net_namespace_list);
 EXPORT_SYMBOL_GPL(net_namespace_list);
index 9837bebf93cea9e9a2f909947326b83b3a3356f9..2d8d8fcfa060c51e2f6cbe240aff3d01b3f964cc 100644 (file)
@@ -353,15 +353,46 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
 }
 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
 
+/* Return with the rtnl_lock held when there are no network
+ * devices unregistering in any network namespace.
+ */
+static void rtnl_lock_unregistering_all(void)
+{
+       struct net *net;
+       bool unregistering;
+       DEFINE_WAIT(wait);
+
+       for (;;) {
+               prepare_to_wait(&netdev_unregistering_wq, &wait,
+                               TASK_UNINTERRUPTIBLE);
+               unregistering = false;
+               rtnl_lock();
+               for_each_net(net) {
+                       if (net->dev_unreg_count > 0) {
+                               unregistering = true;
+                               break;
+                       }
+               }
+               if (!unregistering)
+                       break;
+               __rtnl_unlock();
+               schedule();
+       }
+       finish_wait(&netdev_unregistering_wq, &wait);
+}
+
 /**
  * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
  * @ops: struct rtnl_link_ops * to unregister
  */
 void rtnl_link_unregister(struct rtnl_link_ops *ops)
 {
-       rtnl_lock();
+       /* Close the race with cleanup_net() */
+       mutex_lock(&net_mutex);
+       rtnl_lock_unregistering_all();
        __rtnl_link_unregister(ops);
        rtnl_unlock();
+       mutex_unlock(&net_mutex);
 }
 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
 
index 1b62343f58378b3d8fc0e3ea048dbb45ce1e3a76..8383b2bddeb923bd629da7d9eac2cdd1ff1d81bd 100644 (file)
@@ -3076,7 +3076,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        if (unlikely(p->len + len >= 65536))
                return -E2BIG;
 
-       lp = NAPI_GRO_CB(p)->last ?: p;
+       lp = NAPI_GRO_CB(p)->last;
        pinfo = skb_shinfo(lp);
 
        if (headlen <= offset) {
@@ -3192,7 +3192,7 @@ merge:
 
        __skb_pull(skb, offset);
 
-       if (!NAPI_GRO_CB(p)->last)
+       if (NAPI_GRO_CB(p)->last == p)
                skb_shinfo(p)->frag_list = skb;
        else
                NAPI_GRO_CB(p)->last->next = skb;
index 2f737bf90b3fe4235c75ccca6640c735ecaa076b..eed34338736c275aa02bfa40448801d46dda736b 100644 (file)
@@ -348,8 +348,8 @@ static void __net_random_once_deferred(struct work_struct *w)
 {
        struct __net_random_once_work *work =
                container_of(w, struct __net_random_once_work, work);
-       if (!static_key_enabled(work->key))
-               static_key_slow_inc(work->key);
+       BUG_ON(!static_key_enabled(work->key));
+       static_key_slow_dec(work->key);
        kfree(work);
 }
 
@@ -367,7 +367,7 @@ static void __net_random_once_disable_jump(struct static_key *key)
 }
 
 bool __net_get_random_once(void *buf, int nbytes, bool *done,
-                          struct static_key *done_key)
+                          struct static_key *once_key)
 {
        static DEFINE_SPINLOCK(lock);
        unsigned long flags;
@@ -382,7 +382,7 @@ bool __net_get_random_once(void *buf, int nbytes, bool *done,
        *done = true;
        spin_unlock_irqrestore(&lock, flags);
 
-       __net_random_once_disable_jump(done_key);
+       __net_random_once_disable_jump(once_key);
 
        return true;
 }
index 0eb5d5e76dfbe1f99537e8a561f1671389c09e16..5db37cef50a9ccd80c642118f54dd4ecf04219b8 100644 (file)
@@ -406,8 +406,9 @@ static int dsa_of_probe(struct platform_device *pdev)
                goto out_free;
        }
 
-       chip_index = 0;
+       chip_index = -1;
        for_each_available_child_of_node(np, child) {
+               chip_index++;
                cd = &pd->chip[chip_index];
 
                cd->mii_bus = &mdio_bus->dev;
index 8c54870db792cab059bff464d29776510ec3e5ec..6d6dd345bc4d89dea7dac7b179e4ef0f391b3954 100644 (file)
@@ -1650,6 +1650,39 @@ static int __init init_ipv4_mibs(void)
        return register_pernet_subsys(&ipv4_mib_ops);
 }
 
+static __net_init int inet_init_net(struct net *net)
+{
+       /*
+        * Set defaults for local port range
+        */
+       seqlock_init(&net->ipv4.ip_local_ports.lock);
+       net->ipv4.ip_local_ports.range[0] =  32768;
+       net->ipv4.ip_local_ports.range[1] =  61000;
+
+       seqlock_init(&net->ipv4.ping_group_range.lock);
+       /*
+        * Sane defaults - nobody may create ping sockets.
+        * Boot scripts should set this to distro-specific group.
+        */
+       net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
+       net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
+       return 0;
+}
+
+static __net_exit void inet_exit_net(struct net *net)
+{
+}
+
+static __net_initdata struct pernet_operations af_inet_ops = {
+       .init = inet_init_net,
+       .exit = inet_exit_net,
+};
+
+static int __init init_inet_pernet_ops(void)
+{
+       return register_pernet_subsys(&af_inet_ops);
+}
+
 static int ipv4_proc_init(void);
 
 /*
@@ -1794,6 +1827,9 @@ static int __init inet_init(void)
        if (ip_mr_init())
                pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
 #endif
+
+       if (init_inet_pernet_ops())
+               pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
        /*
         *      Initialise per-cpu ipv4 mibs
         */
index 8a043f03c88ecbb418b5466953abefd54c50b1d0..b10cd43a4722730205272d7822d699bc49ea71d3 100644 (file)
@@ -821,13 +821,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
        if (fi == NULL)
                goto failure;
+       fib_info_cnt++;
        if (cfg->fc_mx) {
                fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
                if (!fi->fib_metrics)
                        goto failure;
        } else
                fi->fib_metrics = (u32 *) dst_default_metrics;
-       fib_info_cnt++;
 
        fi->fib_net = hold_net(net);
        fi->fib_protocol = cfg->fc_protocol;
index 0d1e2cb877ec43692c5a7b4fe57e16cf921a8c97..a56b8e6e866a8c4327f86111adac947e9ffc2445 100644 (file)
@@ -37,11 +37,11 @@ void inet_get_local_port_range(struct net *net, int *low, int *high)
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
 
-               *low = net->ipv4.sysctl_local_ports.range[0];
-               *high = net->ipv4.sysctl_local_ports.range[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+               *low = net->ipv4.ip_local_ports.range[0];
+               *high = net->ipv4.ip_local_ports.range[1];
+       } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
 }
 EXPORT_SYMBOL(inet_get_local_port_range);
 
index be8abe73bb9f464a2e68679255acde3b708ce84b..6f111e48e11c15a19ffd60d345b1399fd3c66953 100644 (file)
 static bool ip_may_fragment(const struct sk_buff *skb)
 {
        return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
-              !skb->local_df;
+               skb->local_df;
 }
 
 static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
 {
-       if (skb->len <= mtu || skb->local_df)
+       if (skb->len <= mtu)
                return false;
 
        if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
@@ -56,53 +56,6 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
        return true;
 }
 
-static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
-{
-       unsigned int mtu;
-
-       if (skb->local_df || !skb_is_gso(skb))
-               return false;
-
-       mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
-
-       /* if seglen > mtu, do software segmentation for IP fragmentation on
-        * output.  DF bit cannot be set since ip_forward would have sent
-        * icmp error.
-        */
-       return skb_gso_network_seglen(skb) > mtu;
-}
-
-/* called if GSO skb needs to be fragmented on forward */
-static int ip_forward_finish_gso(struct sk_buff *skb)
-{
-       struct dst_entry *dst = skb_dst(skb);
-       netdev_features_t features;
-       struct sk_buff *segs;
-       int ret = 0;
-
-       features = netif_skb_dev_features(skb, dst->dev);
-       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
-       if (IS_ERR(segs)) {
-               kfree_skb(skb);
-               return -ENOMEM;
-       }
-
-       consume_skb(skb);
-
-       do {
-               struct sk_buff *nskb = segs->next;
-               int err;
-
-               segs->next = NULL;
-               err = dst_output(segs);
-
-               if (err && ret == 0)
-                       ret = err;
-               segs = nskb;
-       } while (segs);
-
-       return ret;
-}
 
 static int ip_forward_finish(struct sk_buff *skb)
 {
@@ -114,9 +67,6 @@ static int ip_forward_finish(struct sk_buff *skb)
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
 
-       if (ip_gso_exceeds_dst_mtu(skb))
-               return ip_forward_finish_gso(skb);
-
        return dst_output(skb);
 }
 
index c10a3ce5cbff0fc0bd0f23ac72188fd9e39fa83f..ed32313e307c43202a4710c6f5b74e14c19a4c20 100644 (file)
@@ -232,8 +232,9 @@ static void ip_expire(unsigned long arg)
                 * "Fragment Reassembly Timeout" message, per RFC792.
                 */
                if (qp->user == IP_DEFRAG_AF_PACKET ||
-                   (qp->user == IP_DEFRAG_CONNTRACK_IN &&
-                    skb_rtable(head)->rt_type != RTN_LOCAL))
+                   ((qp->user >= IP_DEFRAG_CONNTRACK_IN) &&
+                    (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) &&
+                    (skb_rtable(head)->rt_type != RTN_LOCAL)))
                        goto out_rcu_unlock;
 
 
index 1cbeba5edff90fa1ac891d4dd23cfb65464878a4..a52f50187b5495a1157c2ef8e0785da730414022 100644 (file)
@@ -211,6 +211,48 @@ static inline int ip_finish_output2(struct sk_buff *skb)
        return -EINVAL;
 }
 
+static int ip_finish_output_gso(struct sk_buff *skb)
+{
+       netdev_features_t features;
+       struct sk_buff *segs;
+       int ret = 0;
+
+       /* common case: locally created skb or seglen is <= mtu */
+       if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
+             skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
+               return ip_finish_output2(skb);
+
+       /* Slowpath -  GSO segment length is exceeding the dst MTU.
+        *
+        * This can happen in two cases:
+        * 1) TCP GRO packet, DF bit not set
+        * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly
+        * from host network stack.
+        */
+       features = netif_skb_features(skb);
+       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+       if (IS_ERR(segs)) {
+               kfree_skb(skb);
+               return -ENOMEM;
+       }
+
+       consume_skb(skb);
+
+       do {
+               struct sk_buff *nskb = segs->next;
+               int err;
+
+               segs->next = NULL;
+               err = ip_fragment(segs, ip_finish_output2);
+
+               if (err && ret == 0)
+                       ret = err;
+               segs = nskb;
+       } while (segs);
+
+       return ret;
+}
+
 static int ip_finish_output(struct sk_buff *skb)
 {
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
@@ -220,10 +262,13 @@ static int ip_finish_output(struct sk_buff *skb)
                return dst_output(skb);
        }
 #endif
-       if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
+       if (skb_is_gso(skb))
+               return ip_finish_output_gso(skb);
+
+       if (skb->len > ip_skb_dst_mtu(skb))
                return ip_fragment(skb, ip_finish_output2);
-       else
-               return ip_finish_output2(skb);
+
+       return ip_finish_output2(skb);
 }
 
 int ip_mc_output(struct sock *sk, struct sk_buff *skb)
index b3f859731c60eccfdd77517989cff5ba84f6581a..2acc2337d38bfe7f35517e13952cfa8a306c24fd 100644 (file)
@@ -540,9 +540,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        unsigned int max_headroom;      /* The extra header space needed */
        __be32 dst;
        int err;
-       bool connected = true;
+       bool connected;
 
        inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+       connected = (tunnel->parms.iph.daddr != 0);
 
        dst = tnl_params->daddr;
        if (dst == 0) {
@@ -882,6 +883,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
         */
        if (!IS_ERR(itn->fb_tunnel_dev)) {
                itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+               itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
                ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
        }
        rtnl_unlock();
index afcee51b90ede30a846bbd7a5c16b6996d955889..13ef00f1e17b88943ddee9ae9ba52b7d0efe7832 100644 (file)
@@ -239,6 +239,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 static int vti4_err(struct sk_buff *skb, u32 info)
 {
        __be32 spi;
+       __u32 mark;
        struct xfrm_state *x;
        struct ip_tunnel *tunnel;
        struct ip_esp_hdr *esph;
@@ -254,6 +255,8 @@ static int vti4_err(struct sk_buff *skb, u32 info)
        if (!tunnel)
                return -1;
 
+       mark = be32_to_cpu(tunnel->parms.o_key);
+
        switch (protocol) {
        case IPPROTO_ESP:
                esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
@@ -281,7 +284,7 @@ static int vti4_err(struct sk_buff *skb, u32 info)
                return 0;
        }
 
-       x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+       x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
                              spi, protocol, AF_INET);
        if (!x)
                return 0;
index 12e13bd82b5bba4fdd183d5ba2cda098a1c0c683..f40f321b41fc2e30b21019333efdc5757404fe0a 100644 (file)
@@ -22,7 +22,6 @@
 #endif
 #include <net/netfilter/nf_conntrack_zones.h>
 
-/* Returns new sk_buff, or NULL */
 static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
 {
        int err;
@@ -33,8 +32,10 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
        err = ip_defrag(skb, user);
        local_bh_enable();
 
-       if (!err)
+       if (!err) {
                ip_send_check(ip_hdr(skb));
+               skb->local_df = 1;
+       }
 
        return err;
 }
index 8210964a9f19bedf17d6f3266c1fd0775f3de144..044a0ddf6a791ace04fbb1802e64563bf3fc5518 100644 (file)
@@ -236,15 +236,15 @@ exit:
 static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
                                          kgid_t *high)
 {
-       kgid_t *data = net->ipv4.sysctl_ping_group_range;
+       kgid_t *data = net->ipv4.ping_group_range.range;
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
 }
 
 
index db1e0da871f40a2284d67bd48c0f21d772b923f3..5e676be3daeb19e5f48df5e767a66b83ac73b432 100644 (file)
@@ -1519,7 +1519,7 @@ static int __mkroute_input(struct sk_buff *skb,
        struct in_device *out_dev;
        unsigned int flags = 0;
        bool do_cache;
-       u32 itag;
+       u32 itag = 0;
 
        /* get a working reference to the output device */
        out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
index 44eba052b43d3ab49ba7630bcd82e73e5b094472..5cde8f263d40c0eb0204d310fee99146dabb7a87 100644 (file)
@@ -45,10 +45,10 @@ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 /* Update system visible IP port range */
 static void set_local_port_range(struct net *net, int range[2])
 {
-       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
-       net->ipv4.sysctl_local_ports.range[0] = range[0];
-       net->ipv4.sysctl_local_ports.range[1] = range[1];
-       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
+       write_seqlock(&net->ipv4.ip_local_ports.lock);
+       net->ipv4.ip_local_ports.range[0] = range[0];
+       net->ipv4.ip_local_ports.range[1] = range[1];
+       write_sequnlock(&net->ipv4.ip_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -57,7 +57,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
                                 size_t *lenp, loff_t *ppos)
 {
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
+               container_of(table->data, struct net, ipv4.ip_local_ports.range);
        int ret;
        int range[2];
        struct ctl_table tmp = {
@@ -87,14 +87,14 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
 {
        kgid_t *data = table->data;
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
+               container_of(table->data, struct net, ipv4.ping_group_range.range);
        unsigned int seq;
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
 }
 
 /* Update system visible IP port range */
@@ -102,11 +102,11 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
 {
        kgid_t *data = table->data;
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
-       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
+               container_of(table->data, struct net, ipv4.ping_group_range.range);
+       write_seqlock(&net->ipv4.ip_local_ports.lock);
        data[0] = low;
        data[1] = high;
-       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
+       write_sequnlock(&net->ipv4.ip_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -805,7 +805,7 @@ static struct ctl_table ipv4_net_table[] = {
        },
        {
                .procname       = "ping_group_range",
-               .data           = &init_net.ipv4.sysctl_ping_group_range,
+               .data           = &init_net.ipv4.ping_group_range.range,
                .maxlen         = sizeof(gid_t)*2,
                .mode           = 0644,
                .proc_handler   = ipv4_ping_group_range,
@@ -819,8 +819,8 @@ static struct ctl_table ipv4_net_table[] = {
        },
        {
                .procname       = "ip_local_port_range",
-               .maxlen         = sizeof(init_net.ipv4.sysctl_local_ports.range),
-               .data           = &init_net.ipv4.sysctl_local_ports.range,
+               .maxlen         = sizeof(init_net.ipv4.ip_local_ports.range),
+               .data           = &init_net.ipv4.ip_local_ports.range,
                .mode           = 0644,
                .proc_handler   = ipv4_local_port_range,
        },
@@ -858,20 +858,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
                        table[i].data += (void *)net - (void *)&init_net;
        }
 
-       /*
-        * Sane defaults - nobody may create ping sockets.
-        * Boot scripts should set this to distro-specific group.
-        */
-       net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
-       net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
-
-       /*
-        * Set defaults for local port range
-        */
-       seqlock_init(&net->ipv4.sysctl_local_ports.lock);
-       net->ipv4.sysctl_local_ports.range[0] =  32768;
-       net->ipv4.sysctl_local_ports.range[1] =  61000;
-
        net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
        if (net->ipv4.ipv4_hdr == NULL)
                goto err_reg;
index 40e701f2e1e0324af6f0af781ac6715866ad88d3..186a8ecf92fa84bda9d0f6b050131da603c7694a 100644 (file)
@@ -62,10 +62,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
        if (err)
                return err;
 
-       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-       IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED;
-
-       skb->protocol = htons(ETH_P_IP);
+       IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
 
        return x->outer_mode->output2(x, skb);
 }
@@ -73,27 +70,34 @@ EXPORT_SYMBOL(xfrm4_prepare_output);
 
 int xfrm4_output_finish(struct sk_buff *skb)
 {
+       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+       skb->protocol = htons(ETH_P_IP);
+
+#ifdef CONFIG_NETFILTER
+       IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
+#endif
+
+       return xfrm_output(skb);
+}
+
+static int __xfrm4_output(struct sk_buff *skb)
+{
+       struct xfrm_state *x = skb_dst(skb)->xfrm;
+
 #ifdef CONFIG_NETFILTER
-       if (!skb_dst(skb)->xfrm) {
+       if (!x) {
                IPCB(skb)->flags |= IPSKB_REROUTED;
                return dst_output(skb);
        }
-
-       IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
 #endif
 
-       skb->protocol = htons(ETH_P_IP);
-       return xfrm_output(skb);
+       return x->outer_mode->afinfo->output_finish(skb);
 }
 
 int xfrm4_output(struct sock *sk, struct sk_buff *skb)
 {
-       struct dst_entry *dst = skb_dst(skb);
-       struct xfrm_state *x = dst->xfrm;
-
        return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
-                           NULL, dst->dev,
-                           x->outer_mode->afinfo->output_finish,
+                           NULL, skb_dst(skb)->dev, __xfrm4_output,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
index 7f7b243e8139defccf14165971a92a7261c29b71..a2ce0101eaac846b1e36e69a5a89c804ca247c44 100644 (file)
@@ -50,8 +50,12 @@ int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
 {
        int ret;
        struct xfrm4_protocol *handler;
+       struct xfrm4_protocol __rcu **head = proto_handlers(protocol);
 
-       for_each_protocol_rcu(*proto_handlers(protocol), handler)
+       if (!head)
+               return 0;
+
+       for_each_protocol_rcu(*head, handler)
                if ((ret = handler->cb_handler(skb, err)) <= 0)
                        return ret;
 
@@ -64,15 +68,20 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
 {
        int ret;
        struct xfrm4_protocol *handler;
+       struct xfrm4_protocol __rcu **head = proto_handlers(nexthdr);
 
        XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
        XFRM_SPI_SKB_CB(skb)->family = AF_INET;
        XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
 
-       for_each_protocol_rcu(*proto_handlers(nexthdr), handler)
+       if (!head)
+               goto out;
+
+       for_each_protocol_rcu(*head, handler)
                if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL)
                        return ret;
 
+out:
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
        kfree_skb(skb);
@@ -208,6 +217,9 @@ int xfrm4_protocol_register(struct xfrm4_protocol *handler,
        int ret = -EEXIST;
        int priority = handler->priority;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm4_protocol_mutex);
 
        if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -250,6 +262,9 @@ int xfrm4_protocol_deregister(struct xfrm4_protocol *handler,
        struct xfrm4_protocol *t;
        int ret = -ENOENT;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm4_protocol_mutex);
 
        for (pprev = proto_handlers(protocol);
index 59f95affceb0773d052184bdf5fec9f276433d63..b2f091566f88453bce5eb3c33b47e1c9c040447c 100644 (file)
@@ -196,7 +196,6 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
        unsigned int off;
        u16 flush = 1;
        int proto;
-       __wsum csum;
 
        off = skb_gro_offset(skb);
        hlen = off + sizeof(*iph);
@@ -264,13 +263,10 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
 
        NAPI_GRO_CB(skb)->flush |= flush;
 
-       csum = skb->csum;
-       skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
+       skb_gro_postpull_rcsum(skb, iph, nlen);
 
        pp = ops->callbacks.gro_receive(head, skb);
 
-       skb->csum = csum;
-
 out_unlock:
        rcu_read_unlock();
 
index 40e7581374f7006c6f8c436ed686919ac93c2b19..fbf11562b54c1a7c8809f33cc969c66319517377 100644 (file)
@@ -344,12 +344,16 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
 
 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 {
-       if (skb->len <= mtu || skb->local_df)
+       if (skb->len <= mtu)
                return false;
 
+       /* ipv6 conntrack defrag sets max_frag_size + local_df */
        if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
                return true;
 
+       if (skb->local_df)
+               return false;
+
        if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
                return false;
 
@@ -1225,7 +1229,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                unsigned int maxnonfragsize, headersize;
 
                headersize = sizeof(struct ipv6hdr) +
-                            (opt ? opt->tot_len : 0) +
+                            (opt ? opt->opt_flen + opt->opt_nflen : 0) +
                             (dst_allfrag(&rt->dst) ?
                              sizeof(struct frag_hdr) : 0) +
                             rt->rt6i_nfheader_len;
index b05b609f69d1cd3e58bd525cb0b5e8b11d429b80..f6a66bb4114db3af0f23ee1f950acf8b9720304d 100644 (file)
@@ -1557,7 +1557,7 @@ static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
 {
        u8 proto;
 
-       if (!data)
+       if (!data || !data[IFLA_IPTUN_PROTO])
                return 0;
 
        proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
index b7c0f827140b402685cc29049cb56646471c2cf2..6cc9f9371cc57cd6b0233815a73134dddfa6207c 100644 (file)
@@ -511,6 +511,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                    u8 type, u8 code, int offset, __be32 info)
 {
        __be32 spi;
+       __u32 mark;
        struct xfrm_state *x;
        struct ip6_tnl *t;
        struct ip_esp_hdr *esph;
@@ -524,6 +525,8 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (!t)
                return -1;
 
+       mark = be32_to_cpu(t->parms.o_key);
+
        switch (protocol) {
        case IPPROTO_ESP:
                esph = (struct ip_esp_hdr *)(skb->data + offset);
@@ -545,7 +548,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
            type != NDISC_REDIRECT)
                return 0;
 
-       x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+       x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
                              spi, protocol, AF_INET6);
        if (!x)
                return 0;
@@ -1097,7 +1100,6 @@ static int __init vti6_tunnel_init(void)
 
        err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP);
        if (err < 0) {
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
 
                goto out;
@@ -1106,7 +1108,6 @@ static int __init vti6_tunnel_init(void)
        err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH);
        if (err < 0) {
                xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
 
                goto out;
@@ -1116,7 +1117,6 @@ static int __init vti6_tunnel_init(void)
        if (err < 0) {
                xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
                xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
 
                goto out;
index 09a22f4f36c9e069c6dfb3074909691ef2c82399..ca8d4ea48a5d9fa641bf129a6fc5e3b428799fa4 100644 (file)
@@ -851,7 +851,7 @@ out:
 static void ndisc_recv_na(struct sk_buff *skb)
 {
        struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
-       const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
+       struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
        const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
        u8 *lladdr = NULL;
        u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
@@ -944,10 +944,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
                        /*
                         * Change: router to host
                         */
-                       struct rt6_info *rt;
-                       rt = rt6_get_dflt_router(saddr, dev);
-                       if (rt)
-                               ip6_del_rt(rt);
+                       rt6_clean_tohost(dev_net(dev),  saddr);
                }
 
 out:
index 95f3f1da0d7f2ff20c3afa3eeda315dd9e2e6b5f..d38e6a8d8b9fb82ec7d583a5ab2abc652838d470 100644 (file)
@@ -30,13 +30,15 @@ int ip6_route_me_harder(struct sk_buff *skb)
                .daddr = iph->daddr,
                .saddr = iph->saddr,
        };
+       int err;
 
        dst = ip6_route_output(net, skb->sk, &fl6);
-       if (dst->error) {
+       err = dst->error;
+       if (err) {
                IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
                LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
                dst_release(dst);
-               return dst->error;
+               return err;
        }
 
        /* Drop old route. */
index 004fffb6c2218a12a9619eb08f359a8f2b391297..6ebdb7b6744cc933801dcf4c6b3dea6ddce8a89c 100644 (file)
@@ -2234,6 +2234,27 @@ void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
        fib6_clean_all(net, fib6_remove_prefsrc, &adni);
 }
 
+#define RTF_RA_ROUTER          (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
+#define RTF_CACHE_GATEWAY      (RTF_GATEWAY | RTF_CACHE)
+
+/* Remove routers and update dst entries when gateway turn into host. */
+static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
+{
+       struct in6_addr *gateway = (struct in6_addr *)arg;
+
+       if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
+            ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
+            ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
+               return -1;
+       }
+       return 0;
+}
+
+void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
+{
+       fib6_clean_all(net, fib6_clean_tohost, gateway);
+}
+
 struct arg_dev_net {
        struct net_device *dev;
        struct net *net;
@@ -2709,6 +2730,9 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
        if (tb[RTA_OIF])
                oif = nla_get_u32(tb[RTA_OIF]);
 
+       if (tb[RTA_MARK])
+               fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
+
        if (iif) {
                struct net_device *dev;
                int flags = 0;
index 0d78132ff18aa018fa4e9918dbfb0dd57f95147a..8517d3cd1aed460bbfb1bfb0f515924f008b790d 100644 (file)
@@ -42,7 +42,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
        if (NAPI_GRO_CB(skb)->flush)
                goto skip_csum;
 
-       wsum = skb->csum;
+       wsum = NAPI_GRO_CB(skb)->csum;
 
        switch (skb->ip_summed) {
        case CHECKSUM_NONE:
index 19ef329bdbf8e7418fa1d352bb6c90218935831e..b930d080c66f231f338c7ea860c8c1d798d91fea 100644 (file)
@@ -114,12 +114,6 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
        if (err)
                return err;
 
-       memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
-#ifdef CONFIG_NETFILTER
-       IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
-#endif
-
-       skb->protocol = htons(ETH_P_IPV6);
        skb->local_df = 1;
 
        return x->outer_mode->output2(x, skb);
@@ -128,11 +122,13 @@ EXPORT_SYMBOL(xfrm6_prepare_output);
 
 int xfrm6_output_finish(struct sk_buff *skb)
 {
+       memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+       skb->protocol = htons(ETH_P_IPV6);
+
 #ifdef CONFIG_NETFILTER
        IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
 #endif
 
-       skb->protocol = htons(ETH_P_IPV6);
        return xfrm_output(skb);
 }
 
@@ -142,6 +138,13 @@ static int __xfrm6_output(struct sk_buff *skb)
        struct xfrm_state *x = dst->xfrm;
        int mtu;
 
+#ifdef CONFIG_NETFILTER
+       if (!x) {
+               IP6CB(skb)->flags |= IP6SKB_REROUTED;
+               return dst_output(skb);
+       }
+#endif
+
        if (skb->protocol == htons(ETH_P_IPV6))
                mtu = ip6_skb_dst_mtu(skb);
        else
@@ -165,6 +168,7 @@ static int __xfrm6_output(struct sk_buff *skb)
 
 int xfrm6_output(struct sock *sk, struct sk_buff *skb)
 {
-       return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
-                      skb_dst(skb)->dev, __xfrm6_output);
+       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb,
+                           NULL, skb_dst(skb)->dev, __xfrm6_output,
+                           !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
index 6ab989c486f7ee66c5cd6235ce4f758fe9f277a6..54d13f8dbbae10670756eee0b16b898423d06060 100644 (file)
@@ -50,6 +50,10 @@ int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
 {
        int ret;
        struct xfrm6_protocol *handler;
+       struct xfrm6_protocol __rcu **head = proto_handlers(protocol);
+
+       if (!head)
+               return 0;
 
        for_each_protocol_rcu(*proto_handlers(protocol), handler)
                if ((ret = handler->cb_handler(skb, err)) <= 0)
@@ -184,10 +188,12 @@ int xfrm6_protocol_register(struct xfrm6_protocol *handler,
        struct xfrm6_protocol __rcu **pprev;
        struct xfrm6_protocol *t;
        bool add_netproto = false;
-
        int ret = -EEXIST;
        int priority = handler->priority;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm6_protocol_mutex);
 
        if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -230,6 +236,9 @@ int xfrm6_protocol_deregister(struct xfrm6_protocol *handler,
        struct xfrm6_protocol *t;
        int ret = -ENOENT;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm6_protocol_mutex);
 
        for (pprev = proto_handlers(protocol);
index 01e77b0ae0755d037093e7597a42db2a66378a51..8c9d7302c84682f4eec438405cf312da02cf9aab 100644 (file)
@@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
                spin_lock_irqsave(&list->lock, flags);
 
                while (list_skb != (struct sk_buff *)list) {
-                       if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
+                       if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
                                this = list_skb;
                                break;
                        }
index 222c28b75315f1ab43226e08566a5f911c6bacc7..f169b6ee94ee8d6ee9c6daa5dc047c5f8b1d2740 100644 (file)
@@ -317,6 +317,7 @@ struct ieee80211_roc_work {
 
        bool started, abort, hw_begun, notified;
        bool to_be_freed;
+       bool on_channel;
 
        unsigned long hw_start_time;
 
index dee50aefd6e868e247ba869e9e9883d4640330e3..27600a9808baeaa31be82ecb0f4218225b28665e 100644 (file)
@@ -3598,18 +3598,24 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
 
        sdata_lock(sdata);
 
-       if (ifmgd->auth_data) {
+       if (ifmgd->auth_data || ifmgd->assoc_data) {
+               const u8 *bssid = ifmgd->auth_data ?
+                               ifmgd->auth_data->bss->bssid :
+                               ifmgd->assoc_data->bss->bssid;
+
                /*
-                * If we are trying to authenticate while suspending, cfg80211
-                * won't know and won't actually abort those attempts, thus we
-                * need to do that ourselves.
+                * If we are trying to authenticate / associate while suspending,
+                * cfg80211 won't know and won't actually abort those attempts,
+                * thus we need to do that ourselves.
                 */
-               ieee80211_send_deauth_disassoc(sdata,
-                                              ifmgd->auth_data->bss->bssid,
+               ieee80211_send_deauth_disassoc(sdata, bssid,
                                               IEEE80211_STYPE_DEAUTH,
                                               WLAN_REASON_DEAUTH_LEAVING,
                                               false, frame_buf);
-               ieee80211_destroy_auth_data(sdata, false);
+               if (ifmgd->assoc_data)
+                       ieee80211_destroy_assoc_data(sdata, false);
+               if (ifmgd->auth_data)
+                       ieee80211_destroy_auth_data(sdata, false);
                cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
                                      IEEE80211_DEAUTH_FRAME_LEN);
        }
index 6fb38558a5e6c79d81fc6ba4a4b03ff4313a701a..7a17decd27f91af8646da20b9ab75fc3e303e3c4 100644 (file)
@@ -333,7 +333,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
                container_of(work, struct ieee80211_roc_work, work.work);
        struct ieee80211_sub_if_data *sdata = roc->sdata;
        struct ieee80211_local *local = sdata->local;
-       bool started;
+       bool started, on_channel;
 
        mutex_lock(&local->mtx);
 
@@ -354,14 +354,26 @@ void ieee80211_sw_roc_work(struct work_struct *work)
        if (!roc->started) {
                struct ieee80211_roc_work *dep;
 
-               /* start this ROC */
-               ieee80211_offchannel_stop_vifs(local);
+               WARN_ON(local->use_chanctx);
+
+               /* If actually operating on the desired channel (with at least
+                * 20 MHz channel width) don't stop all the operations but still
+                * treat it as though the ROC operation started properly, so
+                * other ROC operations won't interfere with this one.
+                */
+               roc->on_channel = roc->chan == local->_oper_chandef.chan &&
+                                 local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
+                                 local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
 
-               /* switch channel etc */
+               /* start this ROC */
                ieee80211_recalc_idle(local);
 
-               local->tmp_channel = roc->chan;
-               ieee80211_hw_config(local, 0);
+               if (!roc->on_channel) {
+                       ieee80211_offchannel_stop_vifs(local);
+
+                       local->tmp_channel = roc->chan;
+                       ieee80211_hw_config(local, 0);
+               }
 
                /* tell userspace or send frame */
                ieee80211_handle_roc_started(roc);
@@ -380,9 +392,10 @@ void ieee80211_sw_roc_work(struct work_struct *work)
  finish:
                list_del(&roc->list);
                started = roc->started;
+               on_channel = roc->on_channel;
                ieee80211_roc_notify_destroy(roc, !roc->abort);
 
-               if (started) {
+               if (started && !on_channel) {
                        ieee80211_flush_queues(local, NULL);
 
                        local->tmp_channel = NULL;
index 216c45b949e513382447050eb560098a5edaa4b3..2b608b2b70ece8a6ed0cb8f8d8eed5b1d44e1536 100644 (file)
@@ -1231,7 +1231,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
                if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
                    test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
                        sta->last_rx = jiffies;
-                       if (ieee80211_is_data(hdr->frame_control)) {
+                       if (ieee80211_is_data(hdr->frame_control) &&
+                           !is_multicast_ether_addr(hdr->addr1)) {
                                sta->last_rx_rate_idx = status->rate_idx;
                                sta->last_rx_rate_flag = status->flag;
                                sta->last_rx_rate_vht_flag = status->vht_flag;
index 137a192e64bc3c2aa61cc9c5912a89bd3008cbe3..847d92f6bef60856be53ad13f0534695e4c6dac7 100644 (file)
@@ -1148,7 +1148,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
        atomic_dec(&ps->num_sta_ps);
 
        /* This station just woke up and isn't aware of our SMPS state */
-       if (!ieee80211_smps_is_restrictive(sta->known_smps_mode,
+       if (!ieee80211_vif_is_mesh(&sdata->vif) &&
+           !ieee80211_smps_is_restrictive(sta->known_smps_mode,
                                           sdata->smps_mode) &&
            sta->known_smps_mode != sdata->bss->req_smps &&
            sta_info_tx_streams(sta) != 1) {
index 00ba90b02ab2ab79c01d58dc5ba25785993f8dd6..60cb7a665976e10e7a909a9545b7643cf34e67a4 100644 (file)
@@ -314,10 +314,9 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
            !is_multicast_ether_addr(hdr->addr1))
                txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
 
-       if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
-           (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
+       if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
                txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
-       else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+       if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
                txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
 
        put_unaligned_le16(txflags, pos);
index a0b0aea76525c341711c129519a1c3f89a704bb9..cec5b60487a4032e2b805df374616ea9ad038669 100644 (file)
 
 #define VIF_ENTRY      __field(enum nl80211_iftype, vif_type) __field(void *, sdata)   \
                        __field(bool, p2p)                                              \
-                       __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
+                       __string(vif_name, sdata->name)
 #define VIF_ASSIGN     __entry->vif_type = sdata->vif.type; __entry->sdata = sdata;    \
                        __entry->p2p = sdata->vif.p2p;                                  \
-                       __assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name)
+                       __assign_str(vif_name, sdata->name)
 #define VIF_PR_FMT     " vif:%s(%d%s)"
 #define VIF_PR_ARG     __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
 
index 275c94f995f7c8401749cbbafb249bb52a418be7..3c365837e910edce60ac2348b0002361c4fca6ea 100644 (file)
@@ -1780,7 +1780,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        mutex_unlock(&local->mtx);
 
        if (sched_scan_stopped)
-               cfg80211_sched_scan_stopped(local->hw.wiphy);
+               cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy);
 
        /*
         * If this is for hw restart things are still running.
index e9e36a256165842ac112e35e612ba79c8f86d305..9265adfdabfcf99acbdf1598067884188c500a49 100644 (file)
@@ -129,9 +129,12 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
        if (!vht_cap_ie || !sband->vht_cap.vht_supported)
                return;
 
-       /* A VHT STA must support 40 MHz */
-       if (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
-               return;
+       /*
+        * A VHT STA must support 40 MHz, but if we verify that here
+        * then we break a few things - some APs (e.g. Netgear R6300v2
+        * and others based on the BCM4360 chipset) will unset this
+        * capability bit when operating in 20 MHz.
+        */
 
        vht_cap->vht_supported = true;
 
index ccc46fa5edbce5e52710a22ae502e49a0f59e0a5..58579634427d2fcbf7f35556424a959697b8655e 100644 (file)
@@ -1336,6 +1336,9 @@ ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
 #ifdef CONFIG_NF_NAT_NEEDED
        int ret;
 
+       if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
+               return 0;
+
        ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
                                        cda[CTA_NAT_DST]);
        if (ret < 0)
index 804105391b9a903354ae9517d602c8c4638a8879..345acfb1720b14f00aae0e5937ab07bfb90e9482 100644 (file)
@@ -66,20 +66,6 @@ struct nft_jumpstack {
        int                     rulenum;
 };
 
-static inline void
-nft_chain_stats(const struct nft_chain *this, const struct nft_pktinfo *pkt,
-               struct nft_jumpstack *jumpstack, unsigned int stackptr)
-{
-       struct nft_stats __percpu *stats;
-       const struct nft_chain *chain = stackptr ? jumpstack[0].chain : this;
-
-       rcu_read_lock_bh();
-       stats = rcu_dereference(nft_base_chain(chain)->stats);
-       __this_cpu_inc(stats->pkts);
-       __this_cpu_add(stats->bytes, pkt->skb->len);
-       rcu_read_unlock_bh();
-}
-
 enum nft_trace {
        NFT_TRACE_RULE,
        NFT_TRACE_RETURN,
@@ -117,13 +103,14 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt,
 unsigned int
 nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
 {
-       const struct nft_chain *chain = ops->priv;
+       const struct nft_chain *chain = ops->priv, *basechain = chain;
        const struct nft_rule *rule;
        const struct nft_expr *expr, *last;
        struct nft_data data[NFT_REG_MAX + 1];
        unsigned int stackptr = 0;
        struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
-       int rulenum = 0;
+       struct nft_stats __percpu *stats;
+       int rulenum;
        /*
         * Cache cursor to avoid problems in case that the cursor is updated
         * while traversing the ruleset.
@@ -131,6 +118,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
        unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor);
 
 do_chain:
+       rulenum = 0;
        rule = list_entry(&chain->rules, struct nft_rule, list);
 next_rule:
        data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
@@ -156,8 +144,10 @@ next_rule:
                switch (data[NFT_REG_VERDICT].verdict) {
                case NFT_BREAK:
                        data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
-                       /* fall through */
+                       continue;
                case NFT_CONTINUE:
+                       if (unlikely(pkt->skb->nf_trace))
+                               nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
                        continue;
                }
                break;
@@ -183,37 +173,44 @@ next_rule:
                jumpstack[stackptr].rule  = rule;
                jumpstack[stackptr].rulenum = rulenum;
                stackptr++;
-               /* fall through */
+               chain = data[NFT_REG_VERDICT].chain;
+               goto do_chain;
        case NFT_GOTO:
+               if (unlikely(pkt->skb->nf_trace))
+                       nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+
                chain = data[NFT_REG_VERDICT].chain;
                goto do_chain;
        case NFT_RETURN:
                if (unlikely(pkt->skb->nf_trace))
                        nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
-
-               /* fall through */
+               break;
        case NFT_CONTINUE:
+               if (unlikely(pkt->skb->nf_trace && !(chain->flags & NFT_BASE_CHAIN)))
+                       nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
                break;
        default:
                WARN_ON(1);
        }
 
        if (stackptr > 0) {
-               if (unlikely(pkt->skb->nf_trace))
-                       nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
-
                stackptr--;
                chain = jumpstack[stackptr].chain;
                rule  = jumpstack[stackptr].rule;
                rulenum = jumpstack[stackptr].rulenum;
                goto next_rule;
        }
-       nft_chain_stats(chain, pkt, jumpstack, stackptr);
 
        if (unlikely(pkt->skb->nf_trace))
-               nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_POLICY);
+               nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
+
+       rcu_read_lock_bh();
+       stats = rcu_dereference(nft_base_chain(basechain)->stats);
+       __this_cpu_inc(stats->pkts);
+       __this_cpu_add(stats->bytes, pkt->skb->len);
+       rcu_read_unlock_bh();
 
-       return nft_base_chain(chain)->policy;
+       return nft_base_chain(basechain)->policy;
 }
 EXPORT_SYMBOL_GPL(nft_do_chain);
 
index e009087620e30ecdae7fe1bda155c0d905d9f6c0..23ef77c60fffc60a1678f79dfbb769fe9be6a274 100644 (file)
@@ -256,15 +256,15 @@ replay:
 #endif
                {
                        nfnl_unlock(subsys_id);
-                       kfree_skb(nskb);
-                       return netlink_ack(skb, nlh, -EOPNOTSUPP);
+                       netlink_ack(skb, nlh, -EOPNOTSUPP);
+                       return kfree_skb(nskb);
                }
        }
 
        if (!ss->commit || !ss->abort) {
                nfnl_unlock(subsys_id);
-               kfree_skb(nskb);
-               return netlink_ack(skb, nlh, -EOPNOTSUPP);
+               netlink_ack(skb, nlh, -EOPNOTSUPP);
+               return kfree_skb(skb);
        }
 
        while (skb->len >= nlmsg_total_size(0)) {
index 7633a752c65e99189c3e7603e2ebdd6e7438b356..0ad080790a32a341a1ddc57d632302563964a247 100644 (file)
@@ -99,7 +99,7 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
        _debug("tktlen: %x", tktlen);
        if (tktlen > AFSTOKEN_RK_TIX_MAX)
                return -EKEYREJECTED;
-       if (8 * 4 + tktlen != toklen)
+       if (toklen < 8 * 4 + tktlen)
                return -EKEYREJECTED;
 
        plen = sizeof(*token) + sizeof(*token->kad) + tktlen;
index eed8404443d8f0145942c3e459b79934ada9c48d..f435a88d899afff7c132085ccece5427461e5490 100644 (file)
@@ -188,6 +188,12 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
        [TCA_TCINDEX_CLASSID]           = { .type = NLA_U32 },
 };
 
+static void tcindex_filter_result_init(struct tcindex_filter_result *r)
+{
+       memset(r, 0, sizeof(*r));
+       tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+}
+
 static int
 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                  u32 handle, struct tcindex_data *p,
@@ -207,15 +213,11 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                return err;
 
        memcpy(&cp, p, sizeof(cp));
-       memset(&new_filter_result, 0, sizeof(new_filter_result));
-       tcf_exts_init(&new_filter_result.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+       tcindex_filter_result_init(&new_filter_result);
 
+       tcindex_filter_result_init(&cr);
        if (old_r)
-               memcpy(&cr, r, sizeof(cr));
-       else {
-               memset(&cr, 0, sizeof(cr));
-               tcf_exts_init(&cr.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
-       }
+               cr.res = r->res;
 
        if (tb[TCA_TCINDEX_HASH])
                cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
@@ -267,9 +269,14 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        err = -ENOMEM;
        if (!cp.perfect && !cp.h) {
                if (valid_perfect_hash(&cp)) {
+                       int i;
+
                        cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
                        if (!cp.perfect)
                                goto errout;
+                       for (i = 0; i < cp.hash; i++)
+                               tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT,
+                                             TCA_TCINDEX_POLICE);
                        balloc = 1;
                } else {
                        cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
@@ -295,14 +302,17 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                tcf_bind_filter(tp, &cr.res, base);
        }
 
-       tcf_exts_change(tp, &cr.exts, &e);
+       if (old_r)
+               tcf_exts_change(tp, &r->exts, &e);
+       else
+               tcf_exts_change(tp, &cr.exts, &e);
 
        tcf_tree_lock(tp);
        if (old_r && old_r != r)
-               memset(old_r, 0, sizeof(*old_r));
+               tcindex_filter_result_init(old_r);
 
        memcpy(p, &cp, sizeof(cp));
-       memcpy(r, &cr, sizeof(cr));
+       r->res = cr.res;
 
        if (r == &new_filter_result) {
                struct tcindex_filter **fp;
index 7d09a712cb1f1353f13310f5c68b38e750d199a6..88f108edfb586ef3b2d17d15cf66b00da84f93f0 100644 (file)
@@ -284,14 +284,22 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy)
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_results);
 
-void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
+void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+       ASSERT_RTNL();
+
        trace_cfg80211_sched_scan_stopped(wiphy);
 
-       rtnl_lock();
        __cfg80211_stop_sched_scan(rdev, true);
+}
+EXPORT_SYMBOL(cfg80211_sched_scan_stopped_rtnl);
+
+void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
+{
+       rtnl_lock();
+       cfg80211_sched_scan_stopped_rtnl(wiphy);
        rtnl_unlock();
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
index acdcb4a81817b7c78e8e721ff632284b9b806fa9..3546a77033de30d0d2593c6ac4fbac8f58659025 100644 (file)
@@ -234,7 +234,6 @@ void cfg80211_conn_work(struct work_struct *work)
                                        NULL, 0, NULL, 0,
                                        WLAN_STATUS_UNSPECIFIED_FAILURE,
                                        false, NULL);
-                       cfg80211_sme_free(wdev);
                }
                wdev_unlock(wdev);
        }
@@ -648,6 +647,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                        cfg80211_unhold_bss(bss_from_pub(bss));
                        cfg80211_put_bss(wdev->wiphy, bss);
                }
+               cfg80211_sme_free(wdev);
                return;
        }
 
index fd8fa9aa7c4edd698430a9cb0647a8d26095a9a2..5b3add31f9f1202610e67e4d4868598e5846e594 100755 (executable)
@@ -25,7 +25,7 @@ cat << EOF
 #define __IGNORE_rmdir         /* unlinkat */
 #define __IGNORE_lchown                /* fchownat */
 #define __IGNORE_access                /* faccessat */
-#define __IGNORE_rename                /* renameat */
+#define __IGNORE_rename                /* renameat2 */
 #define __IGNORE_readlink      /* readlinkat */
 #define __IGNORE_symlink       /* symlinkat */
 #define __IGNORE_utimes                /* futimesat */
@@ -37,6 +37,9 @@ cat << EOF
 #define __IGNORE_lstat64       /* fstatat64 */
 #endif
 
+/* Missing flags argument */
+#define __IGNORE_renameat      /* renameat2 */
+
 /* CLOEXEC flag */
 #define __IGNORE_pipe          /* pipe2 */
 #define __IGNORE_dup2          /* dup3 */
index 8365909f5f8cfc6f404fe5ce21b936884298d13f..9134dbf70d3ee6898664f895905c8452e89a01c3 100644 (file)
@@ -306,57 +306,138 @@ static int devcgroup_seq_show(struct seq_file *m, void *v)
 }
 
 /**
- * may_access - verifies if a new exception is part of what is allowed
- *             by a dev cgroup based on the default policy +
- *             exceptions. This is used to make sure a child cgroup
- *             won't have more privileges than its parent or to
- *             verify if a certain access is allowed.
- * @dev_cgroup: dev cgroup to be tested against
- * @refex: new exception
- * @behavior: behavior of the exception
+ * match_exception     - iterates the exception list trying to find a complete match
+ * @exceptions: list of exceptions
+ * @type: device type (DEV_BLOCK or DEV_CHAR)
+ * @major: device file major number, ~0 to match all
+ * @minor: device file minor number, ~0 to match all
+ * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
+ *
+ * It is considered a complete match if an exception is found that will
+ * contain the entire range of provided parameters.
+ *
+ * Return: true in case it matches an exception completely
  */
-static bool may_access(struct dev_cgroup *dev_cgroup,
-                      struct dev_exception_item *refex,
-                      enum devcg_behavior behavior)
+static bool match_exception(struct list_head *exceptions, short type,
+                           u32 major, u32 minor, short access)
 {
        struct dev_exception_item *ex;
-       bool match = false;
 
-       rcu_lockdep_assert(rcu_read_lock_held() ||
-                          lockdep_is_held(&devcgroup_mutex),
-                          "device_cgroup::may_access() called without proper synchronization");
+       list_for_each_entry_rcu(ex, exceptions, list) {
+               if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
+                       continue;
+               if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
+                       continue;
+               if (ex->major != ~0 && ex->major != major)
+                       continue;
+               if (ex->minor != ~0 && ex->minor != minor)
+                       continue;
+               /* provided access cannot have more than the exception rule */
+               if (access & (~ex->access))
+                       continue;
+               return true;
+       }
+       return false;
+}
+
+/**
+ * match_exception_partial - iterates the exception list trying to find a partial match
+ * @exceptions: list of exceptions
+ * @type: device type (DEV_BLOCK or DEV_CHAR)
+ * @major: device file major number, ~0 to match all
+ * @minor: device file minor number, ~0 to match all
+ * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
+ *
+ * It is considered a partial match if an exception's range is found to
+ * contain *any* of the devices specified by provided parameters. This is
+ * used to make sure no extra access is being granted that is forbidden by
+ * any of the exception list.
+ *
+ * Return: true in case the provided range mat matches an exception completely
+ */
+static bool match_exception_partial(struct list_head *exceptions, short type,
+                                   u32 major, u32 minor, short access)
+{
+       struct dev_exception_item *ex;
 
-       list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
-               if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
+       list_for_each_entry_rcu(ex, exceptions, list) {
+               if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
                        continue;
-               if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
+               if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
                        continue;
-               if (ex->major != ~0 && ex->major != refex->major)
+               /*
+                * We must be sure that both the exception and the provided
+                * range aren't masking all devices
+                */
+               if (ex->major != ~0 && major != ~0 && ex->major != major)
                        continue;
-               if (ex->minor != ~0 && ex->minor != refex->minor)
+               if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
                        continue;
-               if (refex->access & (~ex->access))
+               /*
+                * In order to make sure the provided range isn't matching
+                * an exception, all its access bits shouldn't match the
+                * exception's access bits
+                */
+               if (!(access & ex->access))
                        continue;
-               match = true;
-               break;
+               return true;
        }
+       return false;
+}
+
+/**
+ * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions
+ * @dev_cgroup: dev cgroup to be tested against
+ * @refex: new exception
+ * @behavior: behavior of the exception's dev_cgroup
+ *
+ * This is used to make sure a child cgroup won't have more privileges
+ * than its parent
+ */
+static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
+                         struct dev_exception_item *refex,
+                         enum devcg_behavior behavior)
+{
+       bool match = false;
+
+       rcu_lockdep_assert(rcu_read_lock_held() ||
+                          lockdep_is_held(&devcgroup_mutex),
+                          "device_cgroup:verify_new_ex called without proper synchronization");
 
        if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
                if (behavior == DEVCG_DEFAULT_ALLOW) {
-                       /* the exception will deny access to certain devices */
+                       /*
+                        * new exception in the child doesn't matter, only
+                        * adding extra restrictions
+                        */ 
                        return true;
                } else {
-                       /* the exception will allow access to certain devices */
+                       /*
+                        * new exception in the child will add more devices
+                        * that can be acessed, so it can't match any of
+                        * parent's exceptions, even slightly
+                        */ 
+                       match = match_exception_partial(&dev_cgroup->exceptions,
+                                                       refex->type,
+                                                       refex->major,
+                                                       refex->minor,
+                                                       refex->access);
+
                        if (match)
-                               /*
-                                * a new exception allowing access shouldn't
-                                * match an parent's exception
-                                */
                                return false;
                        return true;
                }
        } else {
-               /* only behavior == DEVCG_DEFAULT_DENY allowed here */
+               /*
+                * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
+                * the new exception will add access to more devices and must
+                * be contained completely in an parent's exception to be
+                * allowed
+                */
+               match = match_exception(&dev_cgroup->exceptions, refex->type,
+                                       refex->major, refex->minor,
+                                       refex->access);
+
                if (match)
                        /* parent has an exception that matches the proposed */
                        return true;
@@ -378,7 +459,38 @@ static int parent_has_perm(struct dev_cgroup *childcg,
 
        if (!parent)
                return 1;
-       return may_access(parent, ex, childcg->behavior);
+       return verify_new_ex(parent, ex, childcg->behavior);
+}
+
+/**
+ * parent_allows_removal - verify if it's ok to remove an exception
+ * @childcg: child cgroup from where the exception will be removed
+ * @ex: exception being removed
+ *
+ * When removing an exception in cgroups with default ALLOW policy, it must
+ * be checked if removing it will give the child cgroup more access than the
+ * parent.
+ *
+ * Return: true if it's ok to remove exception, false otherwise
+ */
+static bool parent_allows_removal(struct dev_cgroup *childcg,
+                                 struct dev_exception_item *ex)
+{
+       struct dev_cgroup *parent = css_to_devcgroup(css_parent(&childcg->css));
+
+       if (!parent)
+               return true;
+
+       /* It's always allowed to remove access to devices */
+       if (childcg->behavior == DEVCG_DEFAULT_DENY)
+               return true;
+
+       /*
+        * Make sure you're not removing part or a whole exception existing in
+        * the parent cgroup
+        */
+       return !match_exception_partial(&parent->exceptions, ex->type,
+                                       ex->major, ex->minor, ex->access);
 }
 
 /**
@@ -616,17 +728,21 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
 
        switch (filetype) {
        case DEVCG_ALLOW:
-               if (!parent_has_perm(devcgroup, &ex))
-                       return -EPERM;
                /*
                 * If the default policy is to allow by default, try to remove
                 * an matching exception instead. And be silent about it: we
                 * don't want to break compatibility
                 */
                if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+                       /* Check if the parent allows removing it first */
+                       if (!parent_allows_removal(devcgroup, &ex))
+                               return -EPERM;
                        dev_exception_rm(devcgroup, &ex);
-                       return 0;
+                       break;
                }
+
+               if (!parent_has_perm(devcgroup, &ex))
+                       return -EPERM;
                rc = dev_exception_add(devcgroup, &ex);
                break;
        case DEVCG_DENY:
@@ -704,18 +820,18 @@ static int __devcgroup_check_permission(short type, u32 major, u32 minor,
                                        short access)
 {
        struct dev_cgroup *dev_cgroup;
-       struct dev_exception_item ex;
-       int rc;
-
-       memset(&ex, 0, sizeof(ex));
-       ex.type = type;
-       ex.major = major;
-       ex.minor = minor;
-       ex.access = access;
+       bool rc;
 
        rcu_read_lock();
        dev_cgroup = task_devcgroup(current);
-       rc = may_access(dev_cgroup, &ex, dev_cgroup->behavior);
+       if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
+               /* Can't match any of the exceptions, even partially */
+               rc = !match_exception_partial(&dev_cgroup->exceptions,
+                                             type, major, minor, access);
+       else
+               /* Need to match completely one exception to be allowed */
+               rc = match_exception(&dev_cgroup->exceptions, type, major,
+                                    minor, access);
        rcu_read_unlock();
 
        if (!rc)
index 94d08733cb388cdda361e509701da59650714eeb..76cbb9ec953a0da66b403aa41687f2af93c0f690 100644 (file)
@@ -182,6 +182,7 @@ static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
 int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
 {
        struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+       struct snd_pcm_runtime *runtime = substream->runtime;
        int ret;
 
        switch (cmd) {
@@ -196,6 +197,11 @@ int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
                dmaengine_resume(prtd->dma_chan);
                break;
        case SNDRV_PCM_TRIGGER_SUSPEND:
+               if (runtime->info & SNDRV_PCM_INFO_PAUSE)
+                       dmaengine_pause(prtd->dma_chan);
+               else
+                       dmaengine_terminate_all(prtd->dma_chan);
+               break;
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                dmaengine_pause(prtd->dma_chan);
                break;
index 6496822c1808b53c172d56df4382fb88e9e20e95..1ff78ec9f0ac508734d536afbe187b9039127e14 100644 (file)
@@ -818,12 +818,14 @@ int snd_sbmixer_new(struct snd_sb *chip)
                        return err;
                break;
        case SB_HW_DT019X:
-               if ((err = snd_sbmixer_init(chip,
-                                           snd_dt019x_controls,
-                                           ARRAY_SIZE(snd_dt019x_controls),
-                                           snd_dt019x_init_values,
-                                           ARRAY_SIZE(snd_dt019x_init_values),
-                                           "DT019X")) < 0)
+               err = snd_sbmixer_init(chip,
+                                      snd_dt019x_controls,
+                                      ARRAY_SIZE(snd_dt019x_controls),
+                                      snd_dt019x_init_values,
+                                      ARRAY_SIZE(snd_dt019x_init_values),
+                                      "DT019X");
+               if (err < 0)
+                       return err;
                break;
        default:
                strcpy(card->mixername, "???");
index b540ad71eb0d733ab217550a66ac40eb35e22da6..6cc3cf2855586901c45962535fec65718ad3139f 100644 (file)
@@ -1367,6 +1367,12 @@ static int azx_first_init(struct azx *chip)
        /* initialize streams */
        azx_init_stream(chip);
 
+       /* workaround for Broadwell HDMI: the first stream is broken,
+        * so mask it by keeping it as if opened
+        */
+       if (pci->vendor == 0x8086 && pci->device == 0x160c)
+               chip->azx_dev[0].opened = 1;
+
        /* initialize chip */
        azx_init_pci(chip);
        azx_init_chip(chip, (probe_only[dev] & 2) == 0);
@@ -1737,6 +1743,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        /* Lynx Point */
        { PCI_DEVICE(0x8086, 0x8c20),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+       /* 9 Series */
+       { PCI_DEVICE(0x8086, 0x8ca0),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
        /* Wellsburg */
        { PCI_DEVICE(0x8086, 0x8d20),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
index 0cb5b89cd0c8b3e81dd57a0bcdaa471d67d018c1..b4218a19df22209e227538b0056a79cbe1c21279 100644 (file)
@@ -1127,8 +1127,10 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
                                            AMP_OUT_UNMUTE);
 
        eld = &per_pin->sink_eld;
-       if (!eld->monitor_present)
+       if (!eld->monitor_present) {
+               hdmi_set_channel_count(codec, per_pin->cvt_nid, channels);
                return;
+       }
 
        if (!non_pcm && per_pin->chmap_set)
                ca = hdmi_manual_channel_allocation(channels, per_pin->chmap);
@@ -3330,6 +3332,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x10de0051, .name = "GPU 51 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0060, .name = "GPU 60 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0067, .name = "MCP67 HDMI",      .patch = patch_nvhdmi_2ch },
+{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de8001, .name = "MCP73 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x11069f80, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
 { .id = 0x11069f81, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
@@ -3385,6 +3388,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0044");
 MODULE_ALIAS("snd-hda-codec-id:10de0051");
 MODULE_ALIAS("snd-hda-codec-id:10de0060");
 MODULE_ALIAS("snd-hda-codec-id:10de0067");
+MODULE_ALIAS("snd-hda-codec-id:10de0071");
 MODULE_ALIAS("snd-hda-codec-id:10de8001");
 MODULE_ALIAS("snd-hda-codec-id:11069f80");
 MODULE_ALIAS("snd-hda-codec-id:11069f81");
index c1952c9103398953ba0e05078f2f1a21763c8e90..49e884fb3e5db064426758b8c8612a42a1633830 100644 (file)
@@ -4616,13 +4616,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0653, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0657, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0658, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x065c, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x065f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0662, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0667, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0668, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0669, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0674, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x067e, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x067f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0680, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0684, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
index fa158cfe9b32d396d09bdb266727c57203c7e62d..d1929de641e24eb4ec733820f8b7dc717d542d33 100644 (file)
@@ -376,7 +376,7 @@ static int aic31xx_dapm_power_event(struct snd_soc_dapm_widget *w,
                reg = AIC31XX_ADCFLAG;
                break;
        default:
-               dev_err(w->codec->dev, "Unknown widget '%s' calling %s/n",
+               dev_err(w->codec->dev, "Unknown widget '%s' calling %s\n",
                        w->name, __func__);
                return -EINVAL;
        }
index 5522d2566c6742d5ee19f91b4358b30f51276b62..ecd26dd2e442fb2e4019eca0238fdb9edba4a73e 100644 (file)
@@ -154,6 +154,7 @@ static struct reg_default wm8962_reg[] = {
        { 40, 0x0000 },   /* R40    - SPKOUTL volume */
        { 41, 0x0000 },   /* R41    - SPKOUTR volume */
 
+       { 49, 0x0010 },   /* R49    - Class D Control 1 */
        { 51, 0x0003 },   /* R51    - Class D Control 2 */
 
        { 56, 0x0506 },   /* R56    - Clocking 4 */
@@ -795,7 +796,6 @@ static bool wm8962_volatile_register(struct device *dev, unsigned int reg)
        case WM8962_ALC2:
        case WM8962_THERMAL_SHUTDOWN_STATUS:
        case WM8962_ADDITIONAL_CONTROL_4:
-       case WM8962_CLASS_D_CONTROL_1:
        case WM8962_DC_SERVO_6:
        case WM8962_INTERRUPT_STATUS_1:
        case WM8962_INTERRUPT_STATUS_2:
@@ -2929,13 +2929,22 @@ static int wm8962_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
 static int wm8962_mute(struct snd_soc_dai *dai, int mute)
 {
        struct snd_soc_codec *codec = dai->codec;
-       int val;
+       int val, ret;
 
        if (mute)
-               val = WM8962_DAC_MUTE;
+               val = WM8962_DAC_MUTE | WM8962_DAC_MUTE_ALT;
        else
                val = 0;
 
+       /**
+        * The DAC mute bit is mirrored in two registers, update both to keep
+        * the register cache consistent.
+        */
+       ret = snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_1,
+                                 WM8962_DAC_MUTE_ALT, val);
+       if (ret < 0)
+               return ret;
+
        return snd_soc_update_bits(codec, WM8962_ADC_DAC_CONTROL_1,
                                   WM8962_DAC_MUTE, val);
 }
index a1a5d5294c19dea3d76ce03be2dfe490d925bd78..910aafd09d21e210d2b6e0b36c7e3243533d5e6b 100644 (file)
 #define WM8962_SPKOUTL_ENA_MASK                 0x0040  /* SPKOUTL_ENA */
 #define WM8962_SPKOUTL_ENA_SHIFT                     6  /* SPKOUTL_ENA */
 #define WM8962_SPKOUTL_ENA_WIDTH                     1  /* SPKOUTL_ENA */
+#define WM8962_DAC_MUTE_ALT                     0x0010  /* DAC_MUTE */
+#define WM8962_DAC_MUTE_ALT_MASK                0x0010  /* DAC_MUTE */
+#define WM8962_DAC_MUTE_ALT_SHIFT                    4  /* DAC_MUTE */
+#define WM8962_DAC_MUTE_ALT_WIDTH                    1  /* DAC_MUTE */
 #define WM8962_SPKOUTL_PGA_MUTE                 0x0002  /* SPKOUTL_PGA_MUTE */
 #define WM8962_SPKOUTL_PGA_MUTE_MASK            0x0002  /* SPKOUTL_PGA_MUTE */
 #define WM8962_SPKOUTL_PGA_MUTE_SHIFT                1  /* SPKOUTL_PGA_MUTE */
index c8e5db1414d7e75f4077728765a210877f4b3cd9..496ce2eb2f1f31f4c4f0b6776ffd4d9a60243992 100644 (file)
@@ -258,10 +258,16 @@ static int fsl_esai_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
                return -EINVAL;
        }
 
-       if (ratio == 1) {
+       /* Only EXTAL source can be output directly without using PSR and PM */
+       if (ratio == 1 && clksrc == esai_priv->extalclk) {
                /* Bypass all the dividers if not being needed */
                ecr |= tx ? ESAI_ECR_ETO : ESAI_ECR_ERO;
                goto out;
+       } else if (ratio < 2) {
+               /* The ratio should be no less than 2 if using other sources */
+               dev_err(dai->dev, "failed to derive required HCK%c rate\n",
+                               tx ? 'T' : 'R');
+               return -EINVAL;
        }
 
        ret = fsl_esai_divisor_cal(dai, tx, ratio, false, 0);
@@ -307,7 +313,8 @@ static int fsl_esai_set_bclk(struct snd_soc_dai *dai, bool tx, u32 freq)
                return -EINVAL;
        }
 
-       if (esai_priv->sck_div[tx] && (ratio > 16 || ratio == 0)) {
+       /* The ratio should be contented by FP alone if bypassing PM and PSR */
+       if (!esai_priv->sck_div[tx] && (ratio > 16 || ratio == 0)) {
                dev_err(dai->dev, "the ratio is out of range (1 ~ 16)\n");
                return -EINVAL;
        }
@@ -454,12 +461,6 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream,
        }
 
        if (!dai->active) {
-               /* Reset Port C */
-               regmap_update_bits(esai_priv->regmap, REG_ESAI_PRRC,
-                                  ESAI_PRRC_PDC_MASK, ESAI_PRRC_PDC(ESAI_GPIO));
-               regmap_update_bits(esai_priv->regmap, REG_ESAI_PCRC,
-                                  ESAI_PCRC_PC_MASK, ESAI_PCRC_PC(ESAI_GPIO));
-
                /* Set synchronous mode */
                regmap_update_bits(esai_priv->regmap, REG_ESAI_SAICR,
                                   ESAI_SAICR_SYNC, esai_priv->synchronous ?
@@ -519,6 +520,11 @@ static int fsl_esai_hw_params(struct snd_pcm_substream *substream,
 
        regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), mask, val);
 
+       /* Remove ESAI personal reset by configuring ESAI_PCRC and ESAI_PRRC */
+       regmap_update_bits(esai_priv->regmap, REG_ESAI_PRRC,
+                          ESAI_PRRC_PDC_MASK, ESAI_PRRC_PDC(ESAI_GPIO));
+       regmap_update_bits(esai_priv->regmap, REG_ESAI_PCRC,
+                          ESAI_PCRC_PC_MASK, ESAI_PCRC_PC(ESAI_GPIO));
        return 0;
 }
 
index ac869931d7f16c9c4049aefaffb4a7d416417e49..267717aa96c14e971329cfe2f728d1f141de72c3 100644 (file)
@@ -145,7 +145,7 @@ static const struct file_operations audmux_debugfs_fops = {
        .llseek = default_llseek,
 };
 
-static void __init audmux_debugfs_init(void)
+static void audmux_debugfs_init(void)
 {
        int i;
        char buf[20];
index 5d06eecb61986da272fcda4858db56c91d285153..18aee77f8d4a55276194542c41464feb363784d3 100644 (file)
@@ -138,6 +138,7 @@ static int sst_acpi_probe(struct platform_device *pdev)
 
        sst_pdata = &sst_acpi->sst_pdata;
        sst_pdata->id = desc->sst_id;
+       sst_pdata->dma_dev = dev;
        sst_acpi->desc = desc;
        sst_acpi->mach = mach;
 
index a50bf7fc0e3abf8dc729e7ce77f75ef40848480e..adf0aca5aca60423d2643a81679ce291f44d1208 100644 (file)
@@ -324,7 +324,7 @@ static int sst_byt_init(struct sst_dsp *sst, struct sst_pdata *pdata)
        memcpy_toio(sst->addr.lpe + SST_BYT_MAILBOX_OFFSET,
               &pdata->fw_base, sizeof(u32));
 
-       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       ret = dma_coerce_mask_and_coherent(sst->dma_dev, DMA_BIT_MASK(32));
        if (ret)
                return ret;
 
index d0eaeee21be4c634ae88e984cf964c5067b5cae6..0d31dbbf480652e773243dca639f042a4c531bc0 100644 (file)
@@ -542,16 +542,20 @@ struct sst_byt_stream *sst_byt_stream_new(struct sst_byt *byt, int id,
        void *data)
 {
        struct sst_byt_stream *stream;
+       struct sst_dsp *sst = byt->dsp;
+       unsigned long flags;
 
        stream = kzalloc(sizeof(*stream), GFP_KERNEL);
        if (stream == NULL)
                return NULL;
 
+       spin_lock_irqsave(&sst->spinlock, flags);
        list_add(&stream->node, &byt->stream_list);
        stream->notify_position = notify_position;
        stream->pdata = data;
        stream->byt = byt;
        stream->str_id = id;
+       spin_unlock_irqrestore(&sst->spinlock, flags);
 
        return stream;
 }
@@ -630,6 +634,8 @@ int sst_byt_stream_free(struct sst_byt *byt, struct sst_byt_stream *stream)
 {
        u64 header;
        int ret = 0;
+       struct sst_dsp *sst = byt->dsp;
+       unsigned long flags;
 
        if (!stream->commited)
                goto out;
@@ -644,8 +650,10 @@ int sst_byt_stream_free(struct sst_byt *byt, struct sst_byt_stream *stream)
 
        stream->commited = false;
 out:
+       spin_lock_irqsave(&sst->spinlock, flags);
        list_del(&stream->node);
        kfree(stream);
+       spin_unlock_irqrestore(&sst->spinlock, flags);
 
        return ret;
 }
index 30ca14a6a83595d5acf4e644d7f8d50827a99a98..401213455497258111dcf939c7d963fbbbeb3e37 100644 (file)
@@ -228,6 +228,7 @@ struct sst_dsp {
        spinlock_t spinlock;    /* IPC locking */
        struct mutex mutex;     /* DSP FW lock */
        struct device *dev;
+       struct device *dma_dev;
        void *thread_context;
        int irq;
        u32 id;
index 0c129fd85ecf8a37b3758dfc924a9e5091c9e01e..0b715b20a2d7d46b9f06189de1ee57d9aacc28ab 100644 (file)
@@ -337,6 +337,7 @@ struct sst_dsp *sst_dsp_new(struct device *dev,
        spin_lock_init(&sst->spinlock);
        mutex_init(&sst->mutex);
        sst->dev = dev;
+       sst->dma_dev = pdata->dma_dev;
        sst->thread_context = sst_dev->thread_context;
        sst->sst_dev = sst_dev;
        sst->id = pdata->id;
index 74052b59485ca1ce942444d0a3871ed8a6051672..e44423be66c459ba721249d0efe21bccdaeea4b5 100644 (file)
@@ -169,6 +169,7 @@ struct sst_pdata {
        u32 dma_base;
        u32 dma_size;
        int dma_engine;
+       struct device *dma_dev;
 
        /* DSP */
        u32 id;
index f7687107cf7f51f19a95992b79819270e3dd4734..928f228c38e754db3f41551208493045ce9ee561 100644 (file)
@@ -57,14 +57,8 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
        sst_fw->private = private;
        sst_fw->size = fw->size;
 
-       err = dma_coerce_mask_and_coherent(dsp->dev, DMA_BIT_MASK(32));
-       if (err < 0) {
-               kfree(sst_fw);
-               return NULL;
-       }
-
        /* allocate DMA buffer to store FW data */
-       sst_fw->dma_buf = dma_alloc_coherent(dsp->dev, sst_fw->size,
+       sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
                                &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
        if (!sst_fw->dma_buf) {
                dev_err(dsp->dev, "error: DMA alloc failed\n");
@@ -106,7 +100,7 @@ void sst_fw_free(struct sst_fw *sst_fw)
        list_del(&sst_fw->list);
        mutex_unlock(&dsp->mutex);
 
-       dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
+       dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
                        sst_fw->dmable_fw_paddr);
        kfree(sst_fw);
 }
@@ -202,6 +196,9 @@ static int block_alloc_contiguous(struct sst_module *module,
                size -= block->size;
        }
 
+       list_for_each_entry(block, &tmp, list)
+               list_add(&block->module_list, &module->block_list);
+
        list_splice(&tmp, &dsp->used_block_list);
        return 0;
 }
@@ -247,8 +244,7 @@ static int block_alloc(struct sst_module *module,
                /* do we span > 1 blocks */
                if (data->size > block->size) {
                        ret = block_alloc_contiguous(module, data,
-                               block->offset + block->size,
-                               data->size - block->size);
+                               block->offset, data->size);
                        if (ret == 0)
                                return ret;
                }
@@ -344,7 +340,7 @@ static int block_alloc_fixed(struct sst_module *module,
 
                        err = block_alloc_contiguous(module, data,
                                block->offset + block->size,
-                               data->size - block->size + data->offset - block->offset);
+                               data->size - block->size);
                        if (err < 0)
                                return -ENOMEM;
 
@@ -371,15 +367,10 @@ static int block_alloc_fixed(struct sst_module *module,
                if (data->offset >= block->offset && data->offset < block_end) {
 
                        err = block_alloc_contiguous(module, data,
-                               block->offset + block->size,
-                               data->size - block->size);
+                               block->offset, data->size);
                        if (err < 0)
                                return -ENOMEM;
 
-                       /* add block */
-                       block->data_type = data->data_type;
-                       list_move(&block->list, &dsp->used_block_list);
-                       list_add(&block->module_list, &module->block_list);
                        return 0;
                }
 
index f5ebf36af8898d8a94d4a46886bfcfe0964d0567..535f517629fd608fb7c4bd3eb79a05a67d98a518 100644 (file)
@@ -433,7 +433,7 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
        int ret = -ENODEV, i, j, region_count;
        u32 offset, size;
 
-       dev = sst->dev;
+       dev = sst->dma_dev;
 
        switch (sst->id) {
        case SST_DEV_ID_LYNX_POINT:
@@ -466,7 +466,7 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
                return ret;
        }
 
-       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
        if (ret)
                return ret;
 
index 50e4246d4b57a2df8c9ad0c54deabb4e346ca572..e7996b39a48480a8577ac26d6cad28e3392259aa 100644 (file)
@@ -1159,11 +1159,14 @@ struct sst_hsw_stream *sst_hsw_stream_new(struct sst_hsw *hsw, int id,
        void *data)
 {
        struct sst_hsw_stream *stream;
+       struct sst_dsp *sst = hsw->dsp;
+       unsigned long flags;
 
        stream = kzalloc(sizeof(*stream), GFP_KERNEL);
        if (stream == NULL)
                return NULL;
 
+       spin_lock_irqsave(&sst->spinlock, flags);
        list_add(&stream->node, &hsw->stream_list);
        stream->notify_position = notify_position;
        stream->pdata = data;
@@ -1172,6 +1175,7 @@ struct sst_hsw_stream *sst_hsw_stream_new(struct sst_hsw *hsw, int id,
 
        /* work to process notification messages */
        INIT_WORK(&stream->notify_work, hsw_notification_work);
+       spin_unlock_irqrestore(&sst->spinlock, flags);
 
        return stream;
 }
@@ -1180,6 +1184,8 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
 {
        u32 header;
        int ret = 0;
+       struct sst_dsp *sst = hsw->dsp;
+       unsigned long flags;
 
        /* dont free DSP streams that are not commited */
        if (!stream->commited)
@@ -1201,8 +1207,11 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
        trace_hsw_stream_free_req(stream, &stream->free_req);
 
 out:
+       cancel_work_sync(&stream->notify_work);
+       spin_lock_irqsave(&sst->spinlock, flags);
        list_del(&stream->node);
        kfree(stream);
+       spin_unlock_irqrestore(&sst->spinlock, flags);
 
        return ret;
 }
@@ -1538,10 +1547,28 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
 }
 
 /* Stream pointer positions */
-int sst_hsw_get_dsp_position(struct sst_hsw *hsw,
+u32 sst_hsw_get_dsp_position(struct sst_hsw *hsw,
        struct sst_hsw_stream *stream)
 {
-       return stream->rpos.position;
+       u32 rpos;
+
+       sst_dsp_read(hsw->dsp, &rpos,
+               stream->reply.read_position_register_address, sizeof(rpos));
+
+       return rpos;
+}
+
+/* Stream presentation (monotonic) positions */
+u64 sst_hsw_get_dsp_presentation_position(struct sst_hsw *hsw,
+       struct sst_hsw_stream *stream)
+{
+       u64 ppos;
+
+       sst_dsp_read(hsw->dsp, &ppos,
+               stream->reply.presentation_position_register_address,
+               sizeof(ppos));
+
+       return ppos;
 }
 
 int sst_hsw_stream_set_write_position(struct sst_hsw *hsw,
index d517929ccc389e2aaca3321106b6088250929482..2ac194a6d04b226eb86cb42c2361c643f7400635 100644 (file)
@@ -464,7 +464,9 @@ int sst_hsw_stream_get_write_pos(struct sst_hsw *hsw,
        struct sst_hsw_stream *stream, u32 *position);
 int sst_hsw_stream_set_write_position(struct sst_hsw *hsw,
        struct sst_hsw_stream *stream, u32 stage_id, u32 position);
-int sst_hsw_get_dsp_position(struct sst_hsw *hsw,
+u32 sst_hsw_get_dsp_position(struct sst_hsw *hsw,
+       struct sst_hsw_stream *stream);
+u64 sst_hsw_get_dsp_presentation_position(struct sst_hsw *hsw,
        struct sst_hsw_stream *stream);
 
 /* HW port config */
index 0a32dd13a23d28ab96282b75592c20d5b133ee9f..9d5f64a583a388bd501973c32ae3d5d086d6acc6 100644 (file)
@@ -99,6 +99,7 @@ struct hsw_pcm_data {
        struct snd_compr_stream *cstream;
        unsigned int wpos;
        struct mutex mutex;
+       bool allocated;
 };
 
 /* private data for the driver */
@@ -107,12 +108,14 @@ struct hsw_priv_data {
        struct sst_hsw *hsw;
 
        /* page tables */
-       unsigned char *pcm_pg[HSW_PCM_COUNT][2];
+       struct snd_dma_buffer dmab[HSW_PCM_COUNT][2];
 
        /* DAI data */
        struct hsw_pcm_data pcm[HSW_PCM_COUNT];
 };
 
+static u32 hsw_notify_pointer(struct sst_hsw_stream *stream, void *data);
+
 static inline u32 hsw_mixer_to_ipc(unsigned int value)
 {
        if (value >= ARRAY_SIZE(volume_map))
@@ -273,28 +276,26 @@ static const struct snd_kcontrol_new hsw_volume_controls[] = {
 };
 
 /* Create DMA buffer page table for DSP */
-static int create_adsp_page_table(struct hsw_priv_data *pdata,
-       struct snd_soc_pcm_runtime *rtd,
-       unsigned char *dma_area, size_t size, int pcm, int stream)
+static int create_adsp_page_table(struct snd_pcm_substream *substream,
+       struct hsw_priv_data *pdata, struct snd_soc_pcm_runtime *rtd,
+       unsigned char *dma_area, size_t size, int pcm)
 {
-       int i, pages;
+       struct snd_dma_buffer *dmab = snd_pcm_get_dma_buf(substream);
+       int i, pages, stream = substream->stream;
 
-       if (size % PAGE_SIZE)
-               pages = (size / PAGE_SIZE) + 1;
-       else
-               pages = size / PAGE_SIZE;
+       pages = snd_sgbuf_aligned_pages(size);
 
        dev_dbg(rtd->dev, "generating page table for %p size 0x%zu pages %d\n",
                dma_area, size, pages);
 
        for (i = 0; i < pages; i++) {
                u32 idx = (((i << 2) + i)) >> 1;
-               u32 pfn = (virt_to_phys(dma_area + i * PAGE_SIZE)) >> PAGE_SHIFT;
+               u32 pfn = snd_sgbuf_get_addr(dmab, i * PAGE_SIZE) >> PAGE_SHIFT;
                u32 *pg_table;
 
                dev_dbg(rtd->dev, "pfn i %i idx %d pfn %x\n", i, idx, pfn);
 
-               pg_table = (u32*)(pdata->pcm_pg[pcm][stream] + idx);
+               pg_table = (u32 *)(pdata->dmab[pcm][stream].area + idx);
 
                if (i & 1)
                        *pg_table |= (pfn << 4);
@@ -317,12 +318,36 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
        struct sst_hsw *hsw = pdata->hsw;
        struct sst_module *module_data;
        struct sst_dsp *dsp;
+       struct snd_dma_buffer *dmab;
        enum sst_hsw_stream_type stream_type;
        enum sst_hsw_stream_path_id path_id;
        u32 rate, bits, map, pages, module_id;
        u8 channels;
        int ret;
 
+       /* check if we are being called a subsequent time */
+       if (pcm_data->allocated) {
+               ret = sst_hsw_stream_reset(hsw, pcm_data->stream);
+               if (ret < 0)
+                       dev_dbg(rtd->dev, "error: reset stream failed %d\n",
+                               ret);
+
+               ret = sst_hsw_stream_free(hsw, pcm_data->stream);
+               if (ret < 0) {
+                       dev_dbg(rtd->dev, "error: free stream failed %d\n",
+                               ret);
+                       return ret;
+               }
+               pcm_data->allocated = false;
+
+               pcm_data->stream = sst_hsw_stream_new(hsw, rtd->cpu_dai->id,
+                       hsw_notify_pointer, pcm_data);
+               if (pcm_data->stream == NULL) {
+                       dev_err(rtd->dev, "error: failed to create stream\n");
+                       return -EINVAL;
+               }
+       }
+
        /* stream direction */
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
                path_id = SST_HSW_STREAM_PATH_SSP0_OUT;
@@ -416,8 +441,10 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
                return ret;
        }
 
-       ret = create_adsp_page_table(pdata, rtd, runtime->dma_area,
-               runtime->dma_bytes, rtd->cpu_dai->id, substream->stream);
+       dmab = snd_pcm_get_dma_buf(substream);
+
+       ret = create_adsp_page_table(substream, pdata, rtd, runtime->dma_area,
+               runtime->dma_bytes, rtd->cpu_dai->id);
        if (ret < 0)
                return ret;
 
@@ -430,9 +457,9 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
                pages = runtime->dma_bytes / PAGE_SIZE;
 
        ret = sst_hsw_stream_buffer(hsw, pcm_data->stream,
-               virt_to_phys(pdata->pcm_pg[rtd->cpu_dai->id][substream->stream]),
+               pdata->dmab[rtd->cpu_dai->id][substream->stream].addr,
                pages, runtime->dma_bytes, 0,
-               (u32)(virt_to_phys(runtime->dma_area) >> PAGE_SHIFT));
+               snd_sgbuf_get_addr(dmab, 0) >> PAGE_SHIFT);
        if (ret < 0) {
                dev_err(rtd->dev, "error: failed to set DMA buffer %d\n", ret);
                return ret;
@@ -474,6 +501,7 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
                dev_err(rtd->dev, "error: failed to commit stream %d\n", ret);
                return ret;
        }
+       pcm_data->allocated = true;
 
        ret = sst_hsw_stream_pause(hsw, pcm_data->stream, 1);
        if (ret < 0)
@@ -541,12 +569,14 @@ static snd_pcm_uframes_t hsw_pcm_pointer(struct snd_pcm_substream *substream)
        struct hsw_pcm_data *pcm_data = snd_soc_pcm_get_drvdata(rtd);
        struct sst_hsw *hsw = pdata->hsw;
        snd_pcm_uframes_t offset;
+       uint64_t ppos;
+       u32 position = sst_hsw_get_dsp_position(hsw, pcm_data->stream);
 
-       offset = bytes_to_frames(runtime,
-               sst_hsw_get_dsp_position(hsw, pcm_data->stream));
+       offset = bytes_to_frames(runtime, position);
+       ppos = sst_hsw_get_dsp_presentation_position(hsw, pcm_data->stream);
 
-       dev_dbg(rtd->dev, "PCM: DMA pointer %zu bytes\n",
-               frames_to_bytes(runtime, (u32)offset));
+       dev_dbg(rtd->dev, "PCM: DMA pointer %du bytes, pos %llu\n",
+               position, ppos);
        return offset;
 }
 
@@ -606,6 +636,7 @@ static int hsw_pcm_close(struct snd_pcm_substream *substream)
                dev_dbg(rtd->dev, "error: free stream failed %d\n", ret);
                goto out;
        }
+       pcm_data->allocated = 0;
        pcm_data->stream = NULL;
 
 out:
@@ -621,7 +652,7 @@ static struct snd_pcm_ops hsw_pcm_ops = {
        .hw_free        = hsw_pcm_hw_free,
        .trigger        = hsw_pcm_trigger,
        .pointer        = hsw_pcm_pointer,
-       .mmap           = snd_pcm_lib_default_mmap,
+       .page           = snd_pcm_sgbuf_ops_page,
 };
 
 static void hsw_pcm_free(struct snd_pcm *pcm)
@@ -632,17 +663,16 @@ static void hsw_pcm_free(struct snd_pcm *pcm)
 static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_pcm *pcm = rtd->pcm;
+       struct snd_soc_platform *platform = rtd->platform;
+       struct sst_pdata *pdata = dev_get_platdata(platform->dev);
+       struct device *dev = pdata->dma_dev;
        int ret = 0;
 
-       ret = dma_coerce_mask_and_coherent(rtd->card->dev, DMA_BIT_MASK(32));
-       if (ret)
-               return ret;
-
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream ||
                        pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
                ret = snd_pcm_lib_preallocate_pages_for_all(pcm,
-                       SNDRV_DMA_TYPE_DEV,
-                       rtd->card->dev,
+                       SNDRV_DMA_TYPE_DEV_SG,
+                       dev,
                        hsw_pcm_hardware.buffer_bytes_max,
                        hsw_pcm_hardware.buffer_bytes_max);
                if (ret) {
@@ -742,11 +772,14 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
 {
        struct sst_pdata *pdata = dev_get_platdata(platform->dev);
        struct hsw_priv_data *priv_data;
-       int i;
+       struct device *dma_dev;
+       int i, ret = 0;
 
        if (!pdata)
                return -ENODEV;
 
+       dma_dev = pdata->dma_dev;
+
        priv_data = devm_kzalloc(platform->dev, sizeof(*priv_data), GFP_KERNEL);
        priv_data->hsw = pdata->dsp;
        snd_soc_platform_set_drvdata(platform, priv_data);
@@ -758,15 +791,17 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
 
                /* playback */
                if (hsw_dais[i].playback.channels_min) {
-                       priv_data->pcm_pg[i][0] = kzalloc(PAGE_SIZE, GFP_DMA);
-                       if (priv_data->pcm_pg[i][0] == NULL)
+                       ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dma_dev,
+                               PAGE_SIZE, &priv_data->dmab[i][0]);
+                       if (ret < 0)
                                goto err;
                }
 
                /* capture */
                if (hsw_dais[i].capture.channels_min) {
-                       priv_data->pcm_pg[i][1] = kzalloc(PAGE_SIZE, GFP_DMA);
-                       if (priv_data->pcm_pg[i][1] == NULL)
+                       ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dma_dev,
+                               PAGE_SIZE, &priv_data->dmab[i][1]);
+                       if (ret < 0)
                                goto err;
                }
        }
@@ -776,11 +811,11 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
 err:
        for (;i >= 0; i--) {
                if (hsw_dais[i].playback.channels_min)
-                       kfree(priv_data->pcm_pg[i][0]);
+                       snd_dma_free_pages(&priv_data->dmab[i][0]);
                if (hsw_dais[i].capture.channels_min)
-                       kfree(priv_data->pcm_pg[i][1]);
+                       snd_dma_free_pages(&priv_data->dmab[i][1]);
        }
-       return -ENOMEM;
+       return ret;
 }
 
 static int hsw_pcm_remove(struct snd_soc_platform *platform)
@@ -791,9 +826,9 @@ static int hsw_pcm_remove(struct snd_soc_platform *platform)
 
        for (i = 0; i < ARRAY_SIZE(hsw_dais); i++) {
                if (hsw_dais[i].playback.channels_min)
-                       kfree(priv_data->pcm_pg[i][0]);
+                       snd_dma_free_pages(&priv_data->dmab[i][0]);
                if (hsw_dais[i].capture.channels_min)
-                       kfree(priv_data->pcm_pg[i][1]);
+                       snd_dma_free_pages(&priv_data->dmab[i][1]);
        }
 
        return 0;
index 215b668166be6c50d01963cac2ef62598cf6f7b2..89424470a1f3860eab989eabeab579184e070359 100644 (file)
@@ -197,13 +197,12 @@ static void rsnd_dma_complete(void *data)
         * rsnd_dai_pointer_update() will be called twice,
         * ant it will breaks io->byte_pos
         */
-
-       rsnd_dai_pointer_update(io, io->byte_per_period);
-
        if (dma->submit_loop)
                rsnd_dma_continue(dma);
 
        rsnd_unlock(priv, flags);
+
+       rsnd_dai_pointer_update(io, io->byte_per_period);
 }
 
 static void __rsnd_dma_start(struct rsnd_dma *dma)
index 7769b0a2bc5a5287f932d20524216f7c390fd4c7..6d6ceee447d559e711ba8eccc2e734953585d9ad 100644 (file)
@@ -1612,8 +1612,11 @@ static void dapm_pre_sequence_async(void *data, async_cookie_t cookie)
                                "ASoC: Failed to turn on bias: %d\n", ret);
        }
 
-       /* Prepare for a STADDBY->ON or ON->STANDBY transition */
-       if (d->bias_level != d->target_bias_level) {
+       /* Prepare for a transition to ON or away from ON */
+       if ((d->target_bias_level == SND_SOC_BIAS_ON &&
+            d->bias_level != SND_SOC_BIAS_ON) ||
+           (d->target_bias_level != SND_SOC_BIAS_ON &&
+            d->bias_level == SND_SOC_BIAS_ON)) {
                ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_PREPARE);
                if (ret != 0)
                        dev_err(d->dev,
@@ -3475,8 +3478,11 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card)
                cpu_dai = rtd->cpu_dai;
                codec_dai = rtd->codec_dai;
 
-               /* dynamic FE links have no fixed DAI mapping */
-               if (rtd->dai_link->dynamic)
+               /*
+                * dynamic FE links have no fixed DAI mapping.
+                * CODEC<->CODEC links have no direct connection.
+                */
+               if (rtd->dai_link->dynamic || rtd->dai_link->params)
                        continue;
 
                /* there is no point in connecting BE DAI links with dummies */
index 2cedf09f6d9613c7b34bb22888806fde598052c6..a391de05803765403fc94e989717ad7dae91db5c 100644 (file)
@@ -1675,7 +1675,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
                        be->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
                        break;
                case SNDRV_PCM_TRIGGER_SUSPEND:
-                       if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP)
+                       if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
                                continue;
 
                        if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
index 893d5a1afc3ce6bf854a61ef0c3f4f54cc552d0d..c3b5b7dca1c3a8fd4b771512c841bb32e27b5554 100644 (file)
@@ -651,7 +651,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
        int err = -ENODEV;
 
        down_read(&chip->shutdown_rwsem);
-       if (chip->probing)
+       if (chip->probing && chip->in_pm)
                err = 0;
        else if (!chip->shutdown)
                err = usb_autopm_get_interface(chip->pm_intf);
@@ -663,7 +663,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
 void snd_usb_autosuspend(struct snd_usb_audio *chip)
 {
        down_read(&chip->shutdown_rwsem);
-       if (!chip->shutdown && !chip->probing)
+       if (!chip->shutdown && !chip->probing && !chip->in_pm)
                usb_autopm_put_interface(chip->pm_intf);
        up_read(&chip->shutdown_rwsem);
 }
@@ -695,8 +695,9 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
                        chip->autosuspended = 1;
        }
 
-       list_for_each_entry(mixer, &chip->mixer_list, list)
-               snd_usb_mixer_suspend(mixer);
+       if (chip->num_suspended_intf == 1)
+               list_for_each_entry(mixer, &chip->mixer_list, list)
+                       snd_usb_mixer_suspend(mixer);
 
        return 0;
 }
@@ -711,6 +712,8 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
                return 0;
        if (--chip->num_suspended_intf)
                return 0;
+
+       chip->in_pm = 1;
        /*
         * ALSA leaves material resumption to user space
         * we just notify and restart the mixers
@@ -726,6 +729,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
        chip->autosuspended = 0;
 
 err_out:
+       chip->in_pm = 0;
        return err;
 }
 
index 9867ab866857260df9432b4378d5ba87c1d90834..97acb906acc27041cebb340abaa9d69c36c34634 100644 (file)
@@ -92,6 +92,7 @@ struct snd_usb_endpoint {
        unsigned int curframesize;      /* current packet size in frames (for capture) */
        unsigned int syncmaxsize;       /* sync endpoint packet size */
        unsigned int fill_max:1;        /* fill max packet size always */
+       unsigned int udh01_fb_quirk:1;  /* corrupted feedback data */
        unsigned int datainterval;      /* log_2 of data packet interval */
        unsigned int syncinterval;      /* P for adaptive mode, 0 otherwise */
        unsigned char silence_value;
index e70a87e0d9fe6402765afa0741b9f974b1259bec..289f582c91303cd6bd26d194124b36702f8e4915 100644 (file)
@@ -471,6 +471,10 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
                        ep->syncinterval = 3;
 
                ep->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize);
+
+               if (chip->usb_id == USB_ID(0x0644, 0x8038) /* TEAC UD-H01 */ &&
+                   ep->syncmaxsize == 4)
+                       ep->udh01_fb_quirk = 1;
        }
 
        list_add_tail(&ep->list, &chip->ep_list);
@@ -1105,7 +1109,16 @@ void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
        if (f == 0)
                return;
 
-       if (unlikely(ep->freqshift == INT_MIN)) {
+       if (unlikely(sender->udh01_fb_quirk)) {
+               /*
+                * The TEAC UD-H01 firmware sometimes changes the feedback value
+                * by +/- 0x1.0000.
+                */
+               if (f < ep->freqn - 0x8000)
+                       f += 0x10000;
+               else if (f > ep->freqn + 0x8000)
+                       f -= 0x10000;
+       } else if (unlikely(ep->freqshift == INT_MIN)) {
                /*
                 * The first time we see a feedback value, determine its format
                 * by shifting it left or right until it matches the nominal
index 131336d40492786b2283df01577a1957b0556a66..c62a1659106d2c3da63152d59e1302e2eee47d6d 100644 (file)
@@ -1501,9 +1501,8 @@ static void retire_playback_urb(struct snd_usb_substream *subs,
         * The error should be lower than 2ms since the estimate relies
         * on two reads of a counter updated every ms.
         */
-       if (printk_ratelimit() &&
-           abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
-               dev_dbg(&subs->dev->dev,
+       if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
+               dev_dbg_ratelimited(&subs->dev->dev,
                        "delay: estimated %d, actual %d\n",
                        est_delay, subs->last_delay);
 
index 25c4c7e217de603c1f02c5c714833e6f29d92c3b..91d0380431b4f79f1e209cd973b7f44152caa494 100644 (file)
@@ -40,6 +40,7 @@ struct snd_usb_audio {
        struct rw_semaphore shutdown_rwsem;
        unsigned int shutdown:1;
        unsigned int probing:1;
+       unsigned int in_pm:1;
        unsigned int autosuspended:1;   
        unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
        
index bcae806b0c398d5450975ab08001e7a808e376c4..9a617adc6675dc06552de428c93b3c611599900b 100644 (file)
@@ -44,6 +44,9 @@ cpupower: FORCE
 cgroup firewire hv guest usb virtio vm net: FORCE
        $(call descend,$@)
 
+liblockdep: FORCE
+       $(call descend,lib/lockdep)
+
 libapikfs: FORCE
        $(call descend,lib/api)
 
@@ -91,6 +94,9 @@ cpupower_clean:
 cgroup_clean hv_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean net_clean:
        $(call descend,$(@:_clean=),clean)
 
+liblockdep_clean:
+       $(call descend,lib/lockdep,clean)
+
 libapikfs_clean:
        $(call descend,lib/api,clean)
 
index cb09d3ff8f5856dab489130133ec9c2e2ba73db8..bba2f5253b6e281ff85abdf65a76406d88d6503b 100644 (file)
@@ -1,8 +1,7 @@
 # file format version
 FILE_VERSION = 1
 
-MAKEFLAGS += --no-print-directory
-LIBLOCKDEP_VERSION=$(shell make -sC ../../.. kernelversion)
+LIBLOCKDEP_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
 
 # Makefiles suck: This macro sets a default value of $(2) for the
 # variable named by $(1), unless the variable has been set by
@@ -231,7 +230,7 @@ install_lib: all_cmd
 install: install_lib
 
 clean:
-       $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d
+       $(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d
        $(RM) tags TAGS
 
 endif # skip-makefile