]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'pinctrl-v4.6-3' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 22 Apr 2016 18:52:49 +0000 (11:52 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 22 Apr 2016 18:52:49 +0000 (11:52 -0700)
Pull pin control fixes from Linus Walleij:
 "Some pin control driver fixes came in.  One headed for stable and the
  other two are just ordinary merge window fixes.

   - Make the i.MX driver select REGMAP as a dependency
   - Fix up the Mediatek debounce time unit
   - Fix a real hairy ffs vs __ffs issue in the Single pinctrl driver"

* tag 'pinctrl-v4.6-3' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl:
  pinctrl: single: Fix pcs_parse_bits_in_pinctrl_entry to use __ffs than ffs
  pinctrl: mediatek: correct debounce time unit in mtk_gpio_set_debounce
  pinctrl: imx: Kconfig: PINCTRL_IMX select REGMAP

322 files changed:
Documentation/devicetree/bindings/net/mediatek-net.txt
Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
Documentation/devicetree/bindings/rtc/s3c-rtc.txt
Documentation/kernel-parameters.txt
Documentation/usb/gadget_multi.txt
Documentation/x86/protection-keys.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/boot/dts/axs10x_mb.dtsi
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/include/asm/fb.h [new file with mode: 0644]
arch/arm/boot/dts/am335x-baltos-ir5221.dts
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/boot/dts/armada-385-linksys.dtsi
arch/arm/boot/dts/meson8.dtsi
arch/arm/boot/dts/meson8b.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/configs/u8500_defconfig
arch/arm/include/asm/cputype.h
arch/arm/kernel/setup.c
arch/arm/kvm/arm.c
arch/arm/mach-omap2/id.c
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
arch/arm/mach-omap2/soc.h
arch/arm/mach-pxa/devices.c
arch/arm/mach-sa1100/Kconfig
arch/arm/mach-uniphier/platsmp.c
arch/arm/mm/dma-mapping.c
arch/arm64/boot/dts/broadcom/vulcan.dtsi
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kernel/head.S
arch/arm64/kernel/smp_spin_table.c
arch/arm64/kvm/hyp/s2-setup.c
arch/m68k/coldfire/gpio.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/parisc/Kconfig
arch/parisc/Kconfig.debug
arch/parisc/Makefile
arch/parisc/include/asm/ftrace.h
arch/parisc/kernel/Makefile
arch/parisc/kernel/entry.S
arch/parisc/kernel/ftrace.c
arch/parisc/kernel/head.S
arch/powerpc/include/uapi/asm/cputable.h
arch/powerpc/kernel/prom.c
arch/s390/Kconfig
arch/s390/include/asm/pci.h
arch/s390/include/asm/seccomp.h
arch/s390/lib/spinlock.c
arch/sh/include/asm/smp.h
arch/sh/include/asm/topology.h
arch/sh/kernel/cpu/sh4a/smp-shx3.c
arch/sh/kernel/topology.c
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/head_32.S
arch/x86/boot/compressed/head_64.S
arch/x86/crypto/sha-mb/sha1_mb.c
arch/x86/kernel/cpu/mcheck/mce-genpool.c
arch/x86/kernel/setup.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/mmu.h
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/x86.c
block/partition-generic.c
crypto/rsa-pkcs1pad.c
drivers/bcma/main.c
drivers/block/loop.c
drivers/bus/mvebu-mbus.c
drivers/bus/uniphier-system-bus.c
drivers/char/hw_random/bcm63xx-rng.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/crypto/ccp/ccp-crypto-aes-cmac.c
drivers/crypto/ccp/ccp-crypto-sha.c
drivers/dma/dw/core.c
drivers/dma/edma.c
drivers/dma/hsu/hsu.c
drivers/dma/hsu/hsu.h
drivers/dma/omap-dma.c
drivers/dma/xilinx/xilinx_vdma.c
drivers/extcon/extcon-palmas.c
drivers/firmware/efi/arm-init.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/tonga_ih.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/Makefile
drivers/gpu/drm/exynos/exynos_drm_core.c
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.h
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_mic.c
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/radeon/ni_reg.h
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_dp_mst.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-lenovo.c
drivers/hid/hid-microsoft.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-wiimote-modules.c
drivers/hid/usbhid/hid-core.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hid/wacom_wac.h
drivers/iommu/amd_iommu.c
drivers/iommu/arm-smmu.c
drivers/isdn/mISDN/socket.c
drivers/lguest/interrupts_and_traps.c
drivers/lguest/lg.h
drivers/lguest/x86/core.c
drivers/mailbox/mailbox-test.c
drivers/mailbox/mailbox-xgene-slimpro.c
drivers/mailbox/mailbox.c
drivers/md/dm-cache-metadata.c
drivers/md/dm.c
drivers/misc/lkdtm.c
drivers/mmc/card/block.c
drivers/mmc/host/sdhci-tegra.c
drivers/mtd/nand/nand_base.c
drivers/net/Kconfig
drivers/net/dsa/mv88e6xxx.c
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bgmac.h
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/intel/fm10k/fm10k_pf.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_port.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/phy/spi_ks8995.c
drivers/net/usb/cdc_mbim.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vrf.c
drivers/net/wireless/broadcom/b43/main.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
drivers/nvdimm/pmem.c
drivers/nvme/host/pci.c
drivers/pci/access.c
drivers/pci/host/pci-imx6.c
drivers/pci/pci-sysfs.c
drivers/pci/pci.h
drivers/perf/arm_pmu.c
drivers/platform/x86/hp_accel.c
drivers/platform/x86/intel-hid.c
drivers/platform/x86/intel_pmc_ipc.c
drivers/platform/x86/intel_punit_ipc.c
drivers/platform/x86/intel_telemetry_pltdrv.c
drivers/platform/x86/thinkpad_acpi.c
drivers/pwm/pwm-fsl-ftm.c
drivers/rtc/rtc-ds1307.c
drivers/s390/block/dcssblk.c
drivers/s390/block/scm_blk.c
drivers/tty/pty.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hcd-pci.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-plat.h
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/storage/uas.c
drivers/usb/storage/unusual_uas.h
drivers/usb/storage/usb.c
drivers/video/fbdev/amba-clcd.c
drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
fs/crypto/crypto.c
fs/debugfs/inode.c
fs/devpts/inode.c
fs/ext4/crypto.c
fs/f2fs/data.c
fs/f2fs/file.c
fs/seq_file.c
include/drm/drm_cache.h
include/linux/devpts_fs.h
include/linux/fscrypto.h
include/linux/mlx4/device.h
include/linux/mm.h
include/linux/pci.h
include/linux/pmem.h
include/linux/rculist_nulls.h
include/linux/seq_file.h
include/linux/usb_usual.h
include/net/cls_cgroup.h
include/net/ip6_route.h
include/net/ipv6.h
include/net/route.h
include/net/sctp/structs.h
include/net/sock.h
include/net/tcp.h
include/sound/hda_regmap.h
include/uapi/linux/Kbuild
kernel/bpf/verifier.c
kernel/locking/lockdep.c
kernel/resource.c
lib/assoc_array.c
lib/lz4/lz4defs.h
mm/backing-dev.c
mm/gup.c
mm/nommu.c
net/bridge/netfilter/ebtables.c
net/core/skbuff.c
net/decnet/dn_route.c
net/ipv4/netfilter/arptable_filter.c
net/ipv4/route.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/route.c
net/ipv6/udp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netlink/af_netlink.c
net/openvswitch/actions.c
net/openvswitch/conntrack.c
net/packet/af_packet.c
net/rds/cong.c
net/rds/ib_cm.c
net/sched/sch_generic.c
net/sctp/outqueue.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/transport.c
net/sunrpc/auth_gss/gss_krb5_crypto.c
net/sunrpc/auth_gss/gss_krb5_mech.c
net/tipc/core.c
net/tipc/core.h
net/tipc/name_distr.c
net/vmw_vsock/vmci_transport.c
net/wireless/nl80211.c
scripts/asn1_compiler.c
sound/hda/hdac_device.c
sound/hda/hdac_i915.c
sound/hda/hdac_regmap.c
sound/isa/sscape.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/pcxhr/pcxhr_core.c
sound/usb/mixer_maps.c
sound/usb/quirks.c
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/reuseport_dualstack.c [new file with mode: 0644]
virt/kvm/arm/arch_timer.c
virt/kvm/arm/pmu.c

index 5ca79290eabf0fc65c1a37c03b10fa60ef9e71c4..32eaaca04d9bf501809b0e5719100463f614d37b 100644 (file)
@@ -9,7 +9,8 @@ have dual GMAC each represented by a child node..
 Required properties:
 - compatible: Should be "mediatek,mt7623-eth"
 - reg: Address and length of the register set for the device
-- interrupts: Should contain the frame engines interrupt
+- interrupts: Should contain the three frame engines interrupts in numeric
+       order. These are fe_int0, fe_int1 and fe_int2.
 - clocks: the clock used by the core
 - clock-names: the names of the clock listed in the clocks property. These are
        "ethif", "esw", "gp2", "gp1"
@@ -42,7 +43,9 @@ eth: ethernet@1b100000 {
                 <&ethsys CLK_ETHSYS_GP2>,
                 <&ethsys CLK_ETHSYS_GP1>;
        clock-names = "ethif", "esw", "gp2", "gp1";
-       interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW>;
+       interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW
+                     GIC_SPI 199 IRQ_TYPE_LEVEL_LOW
+                     GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>;
        power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
        resets = <&ethsys MT2701_ETHSYS_ETH_RST>;
        reset-names = "eth";
index 3f6a524cc5ffce28dddb0f2d9e0717098ec6a5a9..32f4a2d6d0b31dc76d458e854c51375c291ea3b0 100644 (file)
@@ -1,13 +1,16 @@
 == Amlogic Meson pinmux controller ==
 
 Required properties for the root node:
- - compatible: "amlogic,meson8-pinctrl" or "amlogic,meson8b-pinctrl"
+ - compatible: one of "amlogic,meson8-cbus-pinctrl"
+                     "amlogic,meson8b-cbus-pinctrl"
+                     "amlogic,meson8-aobus-pinctrl"
+                     "amlogic,meson8b-aobus-pinctrl"
  - reg: address and size of registers controlling irq functionality
 
 === GPIO sub-nodes ===
 
-The 2 power domains of the controller (regular and always-on) are
-represented as sub-nodes and each of them acts as a GPIO controller.
+The GPIO bank for the controller is represented as a sub-node and it acts as a
+GPIO controller.
 
 Required properties for sub-nodes are:
  - reg: should contain address and size for mux, pull-enable, pull and
@@ -18,10 +21,6 @@ Required properties for sub-nodes are:
  - gpio-controller: identifies the node as a gpio controller
  - #gpio-cells: must be 2
 
-Valid sub-node names are:
- - "banks" for the regular domain
- - "ao-bank" for the always-on domain
-
 === Other sub-nodes ===
 
 Child nodes without the "gpio-controller" represent some desired
@@ -45,7 +44,7 @@ pinctrl-bindings.txt
 === Example ===
 
        pinctrl: pinctrl@c1109880 {
-               compatible = "amlogic,meson8-pinctrl";
+               compatible = "amlogic,meson8-cbus-pinctrl";
                reg = <0xc1109880 0x10>;
                #address-cells = <1>;
                #size-cells = <1>;
@@ -61,15 +60,6 @@ pinctrl-bindings.txt
                        #gpio-cells = <2>;
                };
 
-               gpio_ao: ao-bank@c1108030 {
-                       reg = <0xc8100014 0x4>,
-                             <0xc810002c 0x4>,
-                             <0xc8100024 0x8>;
-                       reg-names = "mux", "pull", "gpio";
-                       gpio-controller;
-                       #gpio-cells = <2>;
-               };
-
                nand {
                        mux {
                                groups = "nand_io", "nand_io_ce0", "nand_io_ce1",
@@ -79,18 +69,4 @@ pinctrl-bindings.txt
                                function = "nand";
                        };
                };
-
-               uart_ao_a {
-                       mux {
-                               groups = "uart_tx_ao_a", "uart_rx_ao_a",
-                                        "uart_cts_ao_a", "uart_rts_ao_a";
-                               function = "uart_ao";
-                       };
-
-                       conf {
-                               pins = "GPIOAO_0", "GPIOAO_1",
-                                      "GPIOAO_2", "GPIOAO_3";
-                               bias-disable;
-                       };
-               };
        };
index 1068ffce9f9125559e301f60f9c0ce6ca40d2374..fdde63a5419cb1942ea1480a761beb963db08479 100644 (file)
@@ -15,9 +15,10 @@ Required properties:
   is the rtc tick interrupt. The number of cells representing a interrupt
   depends on the parent interrupt controller.
 - clocks: Must contain a list of phandle and clock specifier for the rtc
-          and source clocks.
-- clock-names: Must contain "rtc" and "rtc_src" entries sorted in the
-               same order as the clocks property.
+          clock and in the case of a s3c6410 compatible controller, also
+          a source clock.
+- clock-names: Must contain "rtc" and for a s3c6410 compatible controller,
+               a "rtc_src" sorted in the same order as the clocks property.
 
 Example:
 
index ecc74fa4bfde8a55d9fee00d96ae5ae4123560e9..0b3de80ec8f69c1aef9dfaa59fb86c14395ba43e 100644 (file)
@@ -4077,6 +4077,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                        sector if the number is odd);
                                i = IGNORE_DEVICE (don't bind to this
                                        device);
+                               j = NO_REPORT_LUNS (don't use report luns
+                                       command, uas only);
                                l = NOT_LOCKABLE (don't try to lock and
                                        unlock ejectable media);
                                m = MAX_SECTORS_64 (don't transfer more
index 7d66a8636cb5d16530194445d9000b2d9064197e..5faf514047e9f8f9c52d5a1549221cccc0090a45 100644 (file)
@@ -43,7 +43,7 @@ For the gadget two work under Windows two conditions have to be met:
 First of all, Windows need to detect the gadget as an USB composite
 gadget which on its own have some conditions[4].  If they are met,
 Windows lets USB Generic Parent Driver[5] handle the device which then
-tries to much drivers for each individual interface (sort of, don't
+tries to match drivers for each individual interface (sort of, don't
 get into too many details).
 
 The good news is: you do not have to worry about most of the
diff --git a/Documentation/x86/protection-keys.txt b/Documentation/x86/protection-keys.txt
new file mode 100644 (file)
index 0000000..c281ded
--- /dev/null
@@ -0,0 +1,27 @@
+Memory Protection Keys for Userspace (PKU aka PKEYs) is a CPU feature
+which will be found on future Intel CPUs.
+
+Memory Protection Keys provides a mechanism for enforcing page-based
+protections, but without requiring modification of the page tables
+when an application changes protection domains.  It works by
+dedicating 4 previously ignored bits in each page table entry to a
+"protection key", giving 16 possible keys.
+
+There is also a new user-accessible register (PKRU) with two separate
+bits (Access Disable and Write Disable) for each key.  Being a CPU
+register, PKRU is inherently thread-local, potentially giving each
+thread a different set of protections from every other thread.
+
+There are two new instructions (RDPKRU/WRPKRU) for reading and writing
+to the new register.  The feature is only available in 64-bit mode,
+even though there is theoretically space in the PAE PTEs.  These
+permissions are enforced on data access only and have no effect on
+instruction fetches.
+
+=========================== Config Option ===========================
+
+This config option adds approximately 1.5kb of text. and 50 bytes of
+data to the executable.  A workload which does large O_DIRECT reads
+of holes in XFS files was run to exercise get_user_pages_fast().  No
+performance delta was observed with the config option
+enabled or disabled.
index 61a323a6b2cfe7348c63f49eadcd858100dc8a03..1d5b4becab6f9a1d3272dc950a16665590e8b6a0 100644 (file)
@@ -6252,8 +6252,8 @@ S:        Maintained
 F:     tools/testing/selftests
 
 KERNEL VIRTUAL MACHINE (KVM)
-M:     Gleb Natapov <gleb@kernel.org>
 M:     Paolo Bonzini <pbonzini@redhat.com>
+M:     Radim Krčmář <rkrcmar@redhat.com>
 L:     kvm@vger.kernel.org
 W:     http://www.linux-kvm.org
 T:     git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
index 1d0aef03eae7fae72f70016575c2f54502706bbb..873411873c036622755caf194037d5e6b2ba17bc 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 6
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Blurry Fish Butt
 
 # *DOCUMENTATION*
index 208aae071b378550fa47ade47332ec9351f62e43..12d0284a46e54a6cc74edde0fb5956924673a183 100644 (file)
@@ -593,7 +593,6 @@ config PCI_SYSCALL
        def_bool PCI
 
 source "drivers/pci/Kconfig"
-source "drivers/pci/pcie/Kconfig"
 
 endmenu
 
index ab5d5701e11d448200d3010df229c9f5f57875d9..44a578c10732cd1ab79558415e3c66f6825c98bd 100644 (file)
                        clocks = <&apbclk>;
                        clock-names = "stmmaceth";
                        max-speed = <100>;
-                       mdio0 {
-                               #address-cells = <1>;
-                               #size-cells = <0>;
-                               compatible = "snps,dwmac-mdio";
-                               phy1: ethernet-phy@1 {
-                                       reg = <1>;
-                               };
-                       };
                };
 
                ehci@0x40000 {
index f8b396c9aedb77e23da95d5cd10c0272102b13fe..491b3b5f22bdcad4e787f9770448de4544241a53 100644 (file)
@@ -42,6 +42,7 @@ CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_BLK_DEV_LOOP=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_NETDEVICES=y
index 56128ea2b748783a5f0011a62cca2c6f5c9f7218..b25ee73b2e79a7967bfe9c31b0949ff9ece52b4d 100644 (file)
@@ -43,6 +43,7 @@ CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_BLK_DEV_LOOP=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_NETDEVICES=y
diff --git a/arch/arc/include/asm/fb.h b/arch/arc/include/asm/fb.h
new file mode 100644 (file)
index 0000000..bd3f68c
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+                               unsigned long off)
+{
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+       return 0;
+}
+
+#endif /* _ASM_FB_H_ */
index 6c667fb3544923fb32704bdf4c22ff587c016f5d..4e28d87e935637140af712abf5a661f338437db9 100644 (file)
 };
 
 &cpsw_emac0 {
-       phy_id = <&davinci_mdio>, <0>;
        phy-mode = "rmii";
        dual_emac_res_vlan = <1>;
+       fixed-link {
+               speed = <100>;
+               full-duplex;
+       };
 };
 
 &cpsw_emac1 {
index 6e4f5af3d8f8b607bc0bce45cd419169b0f7945d..344b861a55a5bc6f9652ec1dce8fcb22ad256042 100644 (file)
                        ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
                                   <&edma_tptc2 0>;
 
-                       ti,edma-memcpy-channels = <32 33>;
+                       ti,edma-memcpy-channels = <58 59>;
                };
 
                edma_tptc0: tptc@49800000 {
index 83dfafaaba1bc019fab7432c8003930c7f8ae1e6..d5dd72047a7ed4a4ea102a571085a2e58c47c0b6 100644 (file)
        tx-num-evt = <32>;
        rx-num-evt = <32>;
 };
+
+&synctimer_32kclk {
+       assigned-clocks = <&mux_synctimer32k_ck>;
+       assigned-clock-parents = <&clkdiv32k_ick>;
+};
index 3710755c6d76b52fe339ea8f8f71d7dd8a1dc0ec..85d2c377c3322f6be3a612d85661534771896153 100644 (file)
                        };
 
                        /* USB part of the eSATA/USB 2.0 port */
-                       usb@50000 {
+                       usb@58000 {
                                status = "okay";
                        };
 
index a2ddcb8c545a01ebdcc86d3c011e348f5295619b..45619f6162c56a6260b7d832678b4cc94b019de0 100644 (file)
@@ -91,8 +91,8 @@
                clock-frequency = <141666666>;
        };
 
-       pinctrl: pinctrl@c1109880 {
-               compatible = "amlogic,meson8-pinctrl";
+       pinctrl_cbus: pinctrl@c1109880 {
+               compatible = "amlogic,meson8-cbus-pinctrl";
                reg = <0xc1109880 0x10>;
                #address-cells = <1>;
                #size-cells = <1>;
                        #gpio-cells = <2>;
                };
 
-               gpio_ao: ao-bank@c1108030 {
-                       reg = <0xc8100014 0x4>,
-                             <0xc810002c 0x4>,
-                             <0xc8100024 0x8>;
-                       reg-names = "mux", "pull", "gpio";
-                       gpio-controller;
-                       #gpio-cells = <2>;
-               };
-
-               uart_ao_a_pins: uart_ao_a {
-                       mux {
-                               groups = "uart_tx_ao_a", "uart_rx_ao_a";
-                               function = "uart_ao";
-                       };
-               };
-
-               i2c_ao_pins: i2c_mst_ao {
-                       mux {
-                               groups = "i2c_mst_sck_ao", "i2c_mst_sda_ao";
-                               function = "i2c_mst_ao";
-                       };
-               };
-
                spi_nor_pins: nor {
                        mux {
                                groups = "nor_d", "nor_q", "nor_c", "nor_cs";
                };
        };
 
+       pinctrl_aobus: pinctrl@c8100084 {
+               compatible = "amlogic,meson8-aobus-pinctrl";
+               reg = <0xc8100084 0xc>;
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               gpio_ao: ao-bank@c1108030 {
+                       reg = <0xc8100014 0x4>,
+                             <0xc810002c 0x4>,
+                             <0xc8100024 0x8>;
+                       reg-names = "mux", "pull", "gpio";
+                       gpio-controller;
+                       #gpio-cells = <2>;
+               };
+
+               uart_ao_a_pins: uart_ao_a {
+                       mux {
+                               groups = "uart_tx_ao_a", "uart_rx_ao_a";
+                               function = "uart_ao";
+                       };
+               };
+
+               i2c_ao_pins: i2c_mst_ao {
+                       mux {
+                               groups = "i2c_mst_sck_ao", "i2c_mst_sda_ao";
+                               function = "i2c_mst_ao";
+                       };
+               };
+       };
 }; /* end of / */
index 8bad5571af461e86cabc9fe708db6865d0a95774..2bfe401a4da9181a8e029b9a073609ea5856c7fc 100644 (file)
                        reg = <0xc1108000 0x4>, <0xc1104000 0x460>;
                };
 
-               pinctrl: pinctrl@c1109880 {
-                       compatible = "amlogic,meson8b-pinctrl";
+               pinctrl_cbus: pinctrl@c1109880 {
+                       compatible = "amlogic,meson8b-cbus-pinctrl";
                        reg = <0xc1109880 0x10>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                                gpio-controller;
                                #gpio-cells = <2>;
                        };
+               };
+
+               pinctrl_aobus: pinctrl@c8100084 {
+                       compatible = "amlogic,meson8b-aobus-pinctrl";
+                       reg = <0xc8100084 0xc>;
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       ranges;
 
                        gpio_ao: ao-bank@c1108030 {
                                reg = <0xc8100014 0x4>,
index 2bd9c83300b2bc3810da219e821f79e4385cc98d..421fe9f8a9ebd779fdf79044cde8be1b387cb0dc 100644 (file)
@@ -70,7 +70,7 @@
                compatible = "arm,cortex-a9-twd-timer";
                clocks = <&mpu_periphclk>;
                reg = <0x48240600 0x20>;
-               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_LEVEL_HIGH)>;
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_EDGE_RISING)>;
                interrupt-parent = <&gic>;
        };
 
index 07055eacbb0f24a045b04bf61f923da9c7ebb43c..a691d590fbd142fcc15a8761e311dfa283ee363c 100644 (file)
@@ -63,6 +63,9 @@ CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_BU21013=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_AB8500_PONKEY=y
+CONFIG_RMI4_CORE=y
+CONFIG_RMI4_I2C=y
+CONFIG_RMI4_F11=y
 # CONFIG_SERIO is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
 # CONFIG_LEGACY_PTYS is not set
index b23c6c81c9ad88df191354d926be53cf4810e8f8..1ee94c716a7f87f45c00436fb9b2fa640014c811 100644 (file)
@@ -276,7 +276,7 @@ static inline int __attribute_const__ cpuid_feature_extract_field(u32 features,
        int feature = (features >> field) & 15;
 
        /* feature registers are signed values */
-       if (feature > 8)
+       if (feature > 7)
                feature -= 16;
 
        return feature;
index a28fce0bdbbe11b7a950a3a0943f8362d7124b87..2c4bea39cf224f8368cca2b4dba61a5b807a3dde 100644 (file)
@@ -512,7 +512,7 @@ static void __init elf_hwcap_fixup(void)
         */
        if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
            (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
-            cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
+            cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
                elf_hwcap &= ~HWCAP_SWP;
 }
 
index b5384311dec4f2f45f24eaec40d3edaa16b8fea4..dded1b763c164c029432860c9a4fb9332ba9a300 100644 (file)
@@ -1112,10 +1112,17 @@ static void __init hyp_cpu_pm_init(void)
 {
        cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
 }
+static void __init hyp_cpu_pm_exit(void)
+{
+       cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
+}
 #else
 static inline void hyp_cpu_pm_init(void)
 {
 }
+static inline void hyp_cpu_pm_exit(void)
+{
+}
 #endif
 
 static void teardown_common_resources(void)
@@ -1141,9 +1148,7 @@ static int init_subsystems(void)
        /*
         * Register CPU Hotplug notifier
         */
-       cpu_notifier_register_begin();
-       err = __register_cpu_notifier(&hyp_init_cpu_nb);
-       cpu_notifier_register_done();
+       err = register_cpu_notifier(&hyp_init_cpu_nb);
        if (err) {
                kvm_err("Cannot register KVM init CPU notifier (%d)\n", err);
                return err;
@@ -1193,6 +1198,8 @@ static void teardown_hyp_mode(void)
        free_hyp_pgds();
        for_each_possible_cpu(cpu)
                free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+       unregister_cpu_notifier(&hyp_init_cpu_nb);
+       hyp_cpu_pm_exit();
 }
 
 static int init_vhe_mode(void)
index d85c24918c177d176fd191aa1ebcfaff5ce90f74..2abd53ae3e7a13f5801b7680ebd65464f2bede5c 100644 (file)
@@ -669,9 +669,9 @@ void __init dra7xxx_check_revision(void)
                case 0:
                        omap_revision = DRA722_REV_ES1_0;
                        break;
+               case 1:
                default:
-                       /* If we have no new revisions */
-                       omap_revision = DRA722_REV_ES1_0;
+                       omap_revision = DRA722_REV_ES2_0;
                        break;
                }
                break;
index 3c87e40650cf374daf3083437c8ec614fdbc136e..9821be6dfd5ed95afcf996ac02d3a9da1257564a 100644 (file)
@@ -368,6 +368,7 @@ void __init omap5_map_io(void)
 void __init dra7xx_map_io(void)
 {
        iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
+       omap_barriers_init();
 }
 #endif
 /*
index b6d62e4cdfddaf53f8d3aa7c1768b5e83255673e..2af6ff63e3b483f197d35761d43dfee90dcafe73 100644 (file)
@@ -1416,9 +1416,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
            (sf & SYSC_HAS_CLOCKACTIVITY))
                _set_clockactivity(oh, oh->class->sysc->clockact, &v);
 
-       /* If the cached value is the same as the new value, skip the write */
-       if (oh->_sysc_cache != v)
-               _write_sysconfig(v, oh);
+       _write_sysconfig(v, oh);
 
        /*
         * Set the autoidle bit only after setting the smartidle bit
@@ -1481,7 +1479,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
                _set_master_standbymode(oh, idlemode, &v);
        }
 
-       _write_sysconfig(v, oh);
+       /* If the cached value is the same as the new value, skip the write */
+       if (oh->_sysc_cache != v)
+               _write_sysconfig(v, oh);
 }
 
 /**
index 39736ad2a7548d41cc2ca078084c6e04e414bf56..df8327713d06566eeb58d8691fcc1ffc2e5952d6 100644 (file)
@@ -582,9 +582,11 @@ static struct omap_hwmod_ocp_if dm81xx_alwon_l3_slow__gpmc = {
        .user           = OCP_USER_MPU,
 };
 
+/* USB needs udelay 1 after reset at least on hp t410, use 2 for margin */
 static struct omap_hwmod_class_sysconfig dm81xx_usbhsotg_sysc = {
        .rev_offs       = 0x0,
        .sysc_offs      = 0x10,
+       .srst_udelay    = 2,
        .sysc_flags     = SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
                                SYSC_HAS_SOFTRESET,
        .idlemodes      = SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_SMART,
index 70df8f6cddccbf1ad7b8372eb847c2a2bdfc755c..364418c78bf37a07151a3213fec5b8a926ecd65f 100644 (file)
@@ -489,6 +489,7 @@ IS_OMAP_TYPE(3430, 0x3430)
 #define DRA752_REV_ES2_0       (DRA7XX_CLASS | (0x52 << 16) | (0x20 << 8))
 #define DRA722_REV_ES1_0       (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
 #define DRA722_REV_ES1_0       (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
+#define DRA722_REV_ES2_0       (DRA7XX_CLASS | (0x22 << 16) | (0x20 << 8))
 
 void omap2xxx_check_revision(void);
 void omap3xxx_check_revision(void);
index 913a319c7b005b0d1c5832ed57e2a1185b182543..fffb697bbf0e3aa85c3eace051a23e3015271a8b 100644 (file)
@@ -1235,5 +1235,6 @@ static struct platform_device pxa2xx_pxa_dma = {
 void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors)
 {
        pxa_dma_pdata.dma_channels = nb_channels;
+       pxa_dma_pdata.nb_requestors = nb_requestors;
        pxa_register_device(&pxa2xx_pxa_dma, &pxa_dma_pdata);
 }
index c6f6ed1cbed08a7f0ebfa40d670ab95b7b1b2d88..36e3c79f497321f580166a7c332e714fb985537b 100644 (file)
@@ -61,10 +61,7 @@ config SA1100_H3100
        select MFD_IPAQ_MICRO
        help
          Say Y here if you intend to run this kernel on the Compaq iPAQ
-         H3100 handheld computer.  Information about this machine and the
-         Linux port to this machine can be found at:
-
-         <http://www.handhelds.org/Compaq/index.html#iPAQ_H3100>
+         H3100 handheld computer.
 
 config SA1100_H3600
        bool "Compaq iPAQ H3600/H3700"
@@ -73,10 +70,7 @@ config SA1100_H3600
        select MFD_IPAQ_MICRO
        help
          Say Y here if you intend to run this kernel on the Compaq iPAQ
-         H3600 handheld computer.  Information about this machine and the
-         Linux port to this machine can be found at:
-
-         <http://www.handhelds.org/Compaq/index.html#iPAQ_H3600>
+         H3600 and H3700 handheld computers.
 
 config SA1100_BADGE4
        bool "HP Labs BadgePAD 4"
index 69141357afe81991595f81348df9f2f0964db06f..db04142f88bc3fab0519a0e21db5ed0a022c19c7 100644 (file)
@@ -120,7 +120,7 @@ static int __init uniphier_smp_prepare_trampoline(unsigned int max_cpus)
        if (ret)
                return ret;
 
-       uniphier_smp_rom_boot_rsv2 = ioremap(rom_rsv2_phys, sizeof(SZ_4));
+       uniphier_smp_rom_boot_rsv2 = ioremap(rom_rsv2_phys, SZ_4);
        if (!uniphier_smp_rom_boot_rsv2) {
                pr_err("failed to map ROM_BOOT_RSV2 register\n");
                return -ENOMEM;
index deac58d5f1f7cf053ca215ec718c6902961a7908..c941e93048ad4d2ba09dabc2bb9451eaabcf8760 100644 (file)
@@ -762,7 +762,8 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
        if (!mask)
                return NULL;
 
-       buf = kzalloc(sizeof(*buf), gfp);
+       buf = kzalloc(sizeof(*buf),
+                     gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
        if (!buf)
                return NULL;
 
index c49b5a85809c5bc96d48f3b8799b965a8f96e860..85820e2bca9df4ce72f63f69670d88586c23930a 100644 (file)
                reg = <0x0 0x30000000  0x0 0x10000000>;
                reg-names = "PCI ECAM";
 
-                         /* IO 0x4000_0000 - 0x4001_0000 */
-               ranges = <0x01000000 0 0x40000000 0 0x40000000 0 0x00010000
-                         /* MEM 0x4800_0000 - 0x5000_0000 */
-                         0x02000000 0 0x48000000 0 0x48000000 0 0x08000000
-                         /* MEM64 pref 0x6_0000_0000 - 0x7_0000_0000 */
-                         0x43000000 6 0x00000000 6 0x00000000 1 0x00000000>;
+               /*
+                * PCI ranges:
+                *   IO         no supported
+                *   MEM        0x4000_0000 - 0x6000_0000
+                *   MEM64 pref 0x40_0000_0000 - 0x60_0000_0000
+                */
+               ranges =
+                 <0x02000000    0 0x40000000    0 0x40000000    0 0x20000000
+                  0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>;
                interrupt-map-mask = <0 0 0 7>;
                interrupt-map =
                      /* addr  pin  ic   icaddr  icintr */
index 4150fd8bae0144c71e1bd956c9b56fe8e9317438..3f29887995bcdc6844d29b80fec33e4e1279fc3b 100644 (file)
  */
 #define VTCR_EL2_FLAGS         (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
                                 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
-                                VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
-                                VTCR_EL2_RES1)
+                                VTCR_EL2_SL0_LVL1 | VTCR_EL2_RES1)
 #define VTTBR_X                (38 - VTCR_EL2_T0SZ_40B)
 #else
 /*
  */
 #define VTCR_EL2_FLAGS         (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
                                 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
-                                VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
-                                VTCR_EL2_RES1)
+                                VTCR_EL2_SL0_LVL1 | VTCR_EL2_RES1)
 #define VTTBR_X                (37 - VTCR_EL2_T0SZ_40B)
 #endif
 
index eb7490d232a0f39328c42fd646de8b5548feb178..40a0a24e6c98c24b3d6aceeb1e979706cf45163c 100644 (file)
@@ -54,7 +54,7 @@ extern void __vgic_v3_init_lrs(void);
 
 extern u32 __kvm_get_mdcr_el2(void);
 
-extern void __init_stage2_translation(void);
+extern u32 __init_stage2_translation(void);
 
 #endif
 
index b7e82a795ac9e087098c957b42e717c0fad0985c..f5c6bd2541ef4d16c6f3b26cee01cc013d50055d 100644 (file)
@@ -369,11 +369,12 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
                               struct kvm_device_attr *attr);
 
-/* #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) */
-
 static inline void __cpu_init_stage2(void)
 {
-       kvm_call_hyp(__init_stage2_translation);
+       u32 parange = kvm_call_hyp(__init_stage2_translation);
+
+       WARN_ONCE(parange < 40,
+                 "PARange is %d bits, unsupported configuration!", parange);
 }
 
 #endif /* __ARM64_KVM_HOST_H__ */
index 4203d5f257bcf396f8cdadb4f613d7048b486f59..85da0f599cd6ac174c52eb1a8b376d171686c718 100644 (file)
@@ -588,6 +588,15 @@ set_hcr:
        msr     vpidr_el2, x0
        msr     vmpidr_el2, x1
 
+       /*
+        * When VHE is not in use, early init of EL2 and EL1 needs to be
+        * done here.
+        * When VHE _is_ in use, EL1 will not be used in the host and
+        * requires no configuration, and all non-hyp-specific EL2 setup
+        * will be done via the _EL1 system register aliases in __cpu_setup.
+        */
+       cbnz    x2, 1f
+
        /* sctlr_el1 */
        mov     x0, #0x0800                     // Set/clear RES{1,0} bits
 CPU_BE(        movk    x0, #0x33d0, lsl #16    )       // Set EE and E0E on BE systems
@@ -597,6 +606,7 @@ CPU_LE(     movk    x0, #0x30d0, lsl #16    )       // Clear EE and E0E on LE systems
        /* Coprocessor traps. */
        mov     x0, #0x33ff
        msr     cptr_el2, x0                    // Disable copro. traps to EL2
+1:
 
 #ifdef CONFIG_COMPAT
        msr     hstr_el2, xzr                   // Disable CP15 traps to EL2
@@ -734,7 +744,8 @@ ENDPROC(__secondary_switched)
 
        .macro  update_early_cpu_boot_status status, tmp1, tmp2
        mov     \tmp2, #\status
-       str_l   \tmp2, __early_cpu_boot_status, \tmp1
+       adr_l   \tmp1, __early_cpu_boot_status
+       str     \tmp2, [\tmp1]
        dmb     sy
        dc      ivac, \tmp1                     // Invalidate potentially stale cache line
        .endm
index aef3605a8c47845ce2ce2d32f75e0fd59a8e4405..18a71bcd26ee4a15ea63e9f85102713f644495d5 100644 (file)
@@ -52,6 +52,7 @@ static void write_pen_release(u64 val)
 static int smp_spin_table_cpu_init(unsigned int cpu)
 {
        struct device_node *dn;
+       int ret;
 
        dn = of_get_cpu_node(cpu, NULL);
        if (!dn)
@@ -60,15 +61,15 @@ static int smp_spin_table_cpu_init(unsigned int cpu)
        /*
         * Determine the address from which the CPU is polling.
         */
-       if (of_property_read_u64(dn, "cpu-release-addr",
-                                &cpu_release_addr[cpu])) {
+       ret = of_property_read_u64(dn, "cpu-release-addr",
+                                  &cpu_release_addr[cpu]);
+       if (ret)
                pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
                       cpu);
 
-               return -1;
-       }
+       of_node_put(dn);
 
-       return 0;
+       return ret;
 }
 
 static int smp_spin_table_cpu_prepare(unsigned int cpu)
index 5a9f3bf542b099ec90185446ec300812a9db64dd..bcbe761a5a3d1fa579fb211d89c165ace3b7c5d1 100644 (file)
 #include <asm/kvm_asm.h>
 #include <asm/kvm_hyp.h>
 
-void __hyp_text __init_stage2_translation(void)
+u32 __hyp_text __init_stage2_translation(void)
 {
        u64 val = VTCR_EL2_FLAGS;
+       u64 parange;
        u64 tmp;
 
        /*
@@ -30,7 +31,39 @@ void __hyp_text __init_stage2_translation(void)
         * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while
         * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2...
         */
-       val |= (read_sysreg(id_aa64mmfr0_el1) & 7) << 16;
+       parange = read_sysreg(id_aa64mmfr0_el1) & 7;
+       val |= parange << 16;
+
+       /* Compute the actual PARange... */
+       switch (parange) {
+       case 0:
+               parange = 32;
+               break;
+       case 1:
+               parange = 36;
+               break;
+       case 2:
+               parange = 40;
+               break;
+       case 3:
+               parange = 42;
+               break;
+       case 4:
+               parange = 44;
+               break;
+       case 5:
+       default:
+               parange = 48;
+               break;
+       }
+
+       /*
+        * ... and clamp it to 40 bits, unless we have some braindead
+        * HW that implements less than that. In all cases, we'll
+        * return that value for the rest of the kernel to decide what
+        * to do.
+        */
+       val |= 64 - (parange > 40 ? 40 : parange);
 
        /*
         * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
@@ -42,4 +75,6 @@ void __hyp_text __init_stage2_translation(void)
                        VTCR_EL2_VS_8BIT;
 
        write_sysreg(val, vtcr_el2);
+
+       return parange;
 }
index 8832083e1cb88f6573c8a1ced391cb3d7aa1c527..b515809be2b961a20260579bdfd6cc8d86208643 100644 (file)
@@ -158,11 +158,6 @@ static int mcfgpio_to_irq(struct gpio_chip *chip, unsigned offset)
                return -EINVAL;
 }
 
-static struct bus_type mcfgpio_subsys = {
-       .name           = "gpio",
-       .dev_name       = "gpio",
-};
-
 static struct gpio_chip mcfgpio_chip = {
        .label                  = "mcfgpio",
        .request                = mcfgpio_request,
@@ -178,8 +173,7 @@ static struct gpio_chip mcfgpio_chip = {
 
 static int __init mcfgpio_sysinit(void)
 {
-       gpiochip_add_data(&mcfgpio_chip, NULL);
-       return subsys_system_register(&mcfgpio_subsys, NULL);
+       return gpiochip_add_data(&mcfgpio_chip, NULL);
 }
 
 core_initcall(mcfgpio_sysinit);
index d1fc4796025edb8769ee01195e64b91821f3625a..3ee6976f60885e5b2e0e4e2a80df6d824de14694 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-amiga"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -64,7 +63,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -285,7 +283,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -359,6 +359,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -452,6 +453,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -468,6 +470,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -549,6 +552,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -557,7 +561,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -565,12 +568,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -594,7 +594,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 9bfe8be3658c18231ca4473d8c075a1fb2ac4d5e..e96787ffcbced33f9893d2760259177086b13c1f 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-apollo"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -62,7 +61,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -283,7 +281,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -341,6 +341,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -411,6 +412,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -427,6 +429,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -508,6 +511,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -516,7 +520,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -524,12 +527,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -553,7 +553,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index ebdcfae555801cd1c7367d4ca40974b3d39099cb..083fe6beac149da81250ca4fccd50798f9e4ef0d 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-atari"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -62,7 +61,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -283,7 +281,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -350,6 +350,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -432,6 +433,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -448,6 +450,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -529,6 +532,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -537,7 +541,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -545,12 +548,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -574,7 +574,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 8acc65e54995388614666716febdab1dda706376..475130c06dcba04b646ec2e7c199aee7045dea7d 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-bvme6000"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -60,7 +59,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -281,7 +279,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -340,6 +340,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -403,6 +404,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -419,6 +421,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -500,6 +503,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -508,7 +512,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -516,12 +519,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -545,7 +545,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 0c6a3d52b26e2b2559f24963040d3ba65a91ed47..4339658c200f3eb8af4bc2645836e109466055cb 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-hp300"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -62,7 +61,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -283,7 +281,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -341,6 +341,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -413,6 +414,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -429,6 +431,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -510,6 +513,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -518,7 +522,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -526,12 +529,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -555,7 +555,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 12a8a6cb32f4914f06c1f5d4c8e5dd6ba31381ef..831cc8c3a2e259f67d318257e72bcab84e36b8c1 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-mac"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -61,7 +60,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -285,7 +283,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -357,6 +357,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -435,6 +436,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -451,6 +453,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -532,6 +535,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -540,7 +544,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -548,12 +551,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -577,7 +577,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 64ff2dcb34c89a3e60e26f9c468d654ed943e97f..6377afeb522bbb9a235d1bd57f36cc14ca36ac5b 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-multi"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -71,7 +70,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -295,7 +293,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -390,6 +390,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -515,6 +516,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -531,6 +533,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -612,6 +615,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -620,7 +624,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -628,12 +631,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -657,7 +657,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 07fc6abcfe0c50e4b656a63a9da36c728b13cec4..4304b3d56262bc677383ba4389b65d5e9a7625cd 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-mvme147"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -59,7 +58,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -280,7 +278,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -339,6 +339,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -403,6 +404,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -419,6 +421,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -500,6 +503,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -508,7 +512,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -516,12 +519,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -545,7 +545,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 69903ded88f71d1d51ad4b430acbd2f745fdf867..074bda4094ffd5b0913d4411371e2597774faab2 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-mvme16x"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -60,7 +59,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -281,7 +279,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -340,6 +340,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -403,6 +404,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -419,6 +421,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -500,6 +503,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -508,7 +512,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -516,12 +519,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -545,7 +545,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index bd8401686ddef143bf036159cb3f4ea650772f32..07b9fa8d7f2ea71dd09a26c8d9abe4a519baa07c 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-q40"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -60,7 +59,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -281,7 +279,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -346,6 +346,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -426,6 +427,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -442,6 +444,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -523,6 +526,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -531,7 +535,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -539,12 +542,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -568,7 +568,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 5f9fb3ab9636808d46b75f3696f477ab91e6dd79..36e6fae02d458e2dcb1a96c0caee68a5301f0341 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-sun3"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -57,7 +56,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -278,7 +276,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -337,6 +337,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -405,6 +406,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -421,6 +423,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -502,6 +505,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -509,7 +513,6 @@ CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -517,12 +520,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -546,7 +546,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index 5d1c674530e2ba73ca43ffc4940f139772962152..903acf929511e5066403bfdd1c4d78bbc4084c35 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-sun3x"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -57,7 +56,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -278,7 +276,9 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -337,6 +337,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -405,6 +406,7 @@ CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -421,6 +423,7 @@ CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -502,6 +505,7 @@ CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -510,7 +514,6 @@ CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -518,12 +521,9 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -547,7 +547,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
index bafaff6dcd7bda8a28101f140159f9a7a76638db..a857d82ec5094abc30e353f25370365194a01194 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            377
+#define NR_syscalls            379
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index 0ca729665f29e9d67851aed2c1c52be75bbd078b..9fe674bf911fd2a4e61d7119f9b91ffbdddf44f5 100644 (file)
 #define __NR_membarrier                374
 #define __NR_mlock2            375
 #define __NR_copy_file_range   376
+#define __NR_preadv2           377
+#define __NR_pwritev2          378
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index 8bb94261ff97d953fcfbe7c305e6a7ecfce27a97..d6fd6d9ced2474ab477b0becefbd5f695d256e96 100644 (file)
@@ -397,3 +397,5 @@ ENTRY(sys_call_table)
        .long sys_membarrier
        .long sys_mlock2                /* 375 */
        .long sys_copy_file_range
+       .long sys_preadv2
+       .long sys_pwritev2
index bd3c873951a1ad2e890a77a57172d2ea7aedee71..88cfaa8af78ea2af3192472920100a4ac1de8207 100644 (file)
@@ -4,8 +4,8 @@ config PARISC
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_IDE
        select HAVE_OPROFILE
-       select HAVE_FUNCTION_TRACER if 64BIT
-       select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
+       select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_GRAPH_TRACER
        select ARCH_WANT_FRAME_POINTERS
        select RTC_CLASS
        select RTC_DRV_GENERIC
index bc989e522a045c17ca4514478b7cb5ef9d77fc4d..68b7cbd0810a77bf321f524b9c8809b508f864d6 100644 (file)
@@ -2,9 +2,13 @@ menu "Kernel hacking"
 
 source "lib/Kconfig.debug"
 
+config TRACE_IRQFLAGS_SUPPORT
+       def_bool y
+
 config DEBUG_RODATA
        bool "Write protect kernel read-only data structures"
        depends on DEBUG_KERNEL
+       default y
        help
          Mark the kernel read-only data as write-protected in the pagetables,
          in order to catch accidental (and incorrect) writes to such const
index 965a0999fc4c081228a34a61daec7f1ef032a107..75cb451b1f03069ccd2e77fae86cb1e4da6b7922 100644 (file)
@@ -62,9 +62,7 @@ cflags-y      += -mdisable-fpregs
 
 # Without this, "ld -r" results in .text sections that are too big
 # (> 0x40000) for branches to reach stubs.
-ifndef CONFIG_FUNCTION_TRACER
-  cflags-y     += -ffunction-sections
-endif
+cflags-y       += -ffunction-sections
 
 # Use long jumps instead of long branches (needed if your linker fails to
 # link a too big vmlinux executable). Not enabled for building modules.
index 544ed8ef87ebbb4da274c7d8fb8e82464fe3fcda..24cd81d58d706faafe469c10a85f1182a28b958a 100644 (file)
@@ -4,23 +4,7 @@
 #ifndef __ASSEMBLY__
 extern void mcount(void);
 
-/*
- * Stack of return addresses for functions of a thread.
- * Used in struct thread_info
- */
-struct ftrace_ret_stack {
-       unsigned long ret;
-       unsigned long func;
-       unsigned long long calltime;
-};
-
-/*
- * Primary handler of a function return.
- * It relays on ftrace_return_to_handler.
- * Defined in entry.S
- */
-extern void return_to_handler(void);
-
+#define MCOUNT_INSN_SIZE 4
 
 extern unsigned long return_address(unsigned int);
 
index ff87b4603e3dc7d2b60caa2a0047e3eada177397..69a11183d48d4da5cf6f9d961d31b7cb91589480 100644 (file)
@@ -15,11 +15,7 @@ ifdef CONFIG_FUNCTION_TRACER
 # Do not profile debug and lowlevel utilities
 CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_cache.o = -pg
-CFLAGS_REMOVE_irq.o = -pg
-CFLAGS_REMOVE_pacache.o = -pg
 CFLAGS_REMOVE_perf.o = -pg
-CFLAGS_REMOVE_traps.o = -pg
-CFLAGS_REMOVE_unaligned.o = -pg
 CFLAGS_REMOVE_unwind.o = -pg
 endif
 
index 623496c117564cdbc7f939dea4ff777e114212ac..39127d3e70e56f2295b6e288f6642ed9908bcfbd 100644 (file)
@@ -1970,43 +1970,98 @@ pt_regs_ok:
        b       intr_restore
        copy    %r25,%r16
 
-       .import schedule,code
 syscall_do_resched:
-       BL      schedule,%r2
+       load32  syscall_check_resched,%r2 /* if resched, we start over again */
+       load32  schedule,%r19
+       bv      %r0(%r19)               /* jumps to schedule() */
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29          /* Reference param save area */
 #else
        nop
 #endif
-       b       syscall_check_resched   /* if resched, we start over again */
-       nop
 ENDPROC(syscall_exit)
 
 
 #ifdef CONFIG_FUNCTION_TRACER
+
        .import ftrace_function_trampoline,code
-ENTRY(_mcount)
-       copy    %r3, %arg2
+       .align L1_CACHE_BYTES
+       .globl mcount
+       .type  mcount, @function
+ENTRY(mcount)
+_mcount:
+       .export _mcount,data
+       .proc
+       .callinfo caller,frame=0
+       .entry
+       /*
+        * The 64bit mcount() function pointer needs 4 dwords, of which the
+        * first two are free.  We optimize it here and put 2 instructions for
+        * calling mcount(), and 2 instructions for ftrace_stub().  That way we
+        * have all on one L1 cacheline.
+        */
        b       ftrace_function_trampoline
+       copy    %r3, %arg2      /* caller original %sp */
+ftrace_stub:
+       .globl ftrace_stub
+        .type  ftrace_stub, @function
+#ifdef CONFIG_64BIT
+       bve     (%rp)
+#else
+       bv      %r0(%rp)
+#endif
        nop
-ENDPROC(_mcount)
+#ifdef CONFIG_64BIT
+       .dword mcount
+       .dword 0 /* code in head.S puts value of global gp here */
+#endif
+       .exit
+       .procend
+ENDPROC(mcount)
 
+       .align 8
+       .globl return_to_handler
+       .type  return_to_handler, @function
 ENTRY(return_to_handler)
-       load32  return_trampoline, %rp
-       copy    %ret0, %arg0
-       copy    %ret1, %arg1
-       b       ftrace_return_to_handler
-       nop
-return_trampoline:
-       copy    %ret0, %rp
-       copy    %r23, %ret0
-       copy    %r24, %ret1
+       .proc
+       .callinfo caller,frame=FRAME_SIZE
+       .entry
+       .export parisc_return_to_handler,data
+parisc_return_to_handler:
+       copy %r3,%r1
+       STREG %r0,-RP_OFFSET(%sp)       /* store 0 as %rp */
+       copy %sp,%r3
+       STREGM %r1,FRAME_SIZE(%sp)
+       STREG %ret0,8(%r3)
+       STREG %ret1,16(%r3)
 
-.globl ftrace_stub
-ftrace_stub:
+#ifdef CONFIG_64BIT
+       loadgp
+#endif
+
+       /* call ftrace_return_to_handler(0) */
+#ifdef CONFIG_64BIT
+       ldo -16(%sp),%ret1              /* Reference param save area */
+#endif
+       BL ftrace_return_to_handler,%r2
+       ldi 0,%r26
+       copy %ret0,%rp
+
+       /* restore original return values */
+       LDREG 8(%r3),%ret0
+       LDREG 16(%r3),%ret1
+
+       /* return from function */
+#ifdef CONFIG_64BIT
+       bve     (%rp)
+#else
        bv      %r0(%rp)
-       nop
+#endif
+       LDREGM -FRAME_SIZE(%sp),%r3
+       .exit
+       .procend
 ENDPROC(return_to_handler)
+
 #endif /* CONFIG_FUNCTION_TRACER */
 
 #ifdef CONFIG_IRQSTACKS
index 559d400f93859ac2ffe096006a2099f0b2137e62..b13f9ec6f2946506c2b42ef4748b447652db81c1 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Code for tracing calls in Linux kernel.
- * Copyright (C) 2009 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
  *
  * based on code for x86 which is:
  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 #include <linux/init.h>
 #include <linux/ftrace.h>
 
+#include <asm/assembly.h>
 #include <asm/sections.h>
 #include <asm/ftrace.h>
 
 
-
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-
-/* Add a function return address to the trace stack on thread info.*/
-static int push_return_trace(unsigned long ret, unsigned long long time,
-                               unsigned long func, int *depth)
-{
-       int index;
-
-       if (!current->ret_stack)
-               return -EBUSY;
-
-       /* The return trace stack is full */
-       if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
-               atomic_inc(&current->trace_overrun);
-               return -EBUSY;
-       }
-
-       index = ++current->curr_ret_stack;
-       barrier();
-       current->ret_stack[index].ret = ret;
-       current->ret_stack[index].func = func;
-       current->ret_stack[index].calltime = time;
-       *depth = index;
-
-       return 0;
-}
-
-/* Retrieve a function return address to the trace stack on thread info.*/
-static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
-{
-       int index;
-
-       index = current->curr_ret_stack;
-
-       if (unlikely(index < 0)) {
-               ftrace_graph_stop();
-               WARN_ON(1);
-               /* Might as well panic, otherwise we have no where to go */
-               *ret = (unsigned long)
-                       dereference_function_descriptor(&panic);
-               return;
-       }
-
-       *ret = current->ret_stack[index].ret;
-       trace->func = current->ret_stack[index].func;
-       trace->calltime = current->ret_stack[index].calltime;
-       trace->overrun = atomic_read(&current->trace_overrun);
-       trace->depth = index;
-       barrier();
-       current->curr_ret_stack--;
-
-}
-
-/*
- * Send the trace to the ring-buffer.
- * @return the original return address.
- */
-unsigned long ftrace_return_to_handler(unsigned long retval0,
-                                      unsigned long retval1)
-{
-       struct ftrace_graph_ret trace;
-       unsigned long ret;
-
-       pop_return_trace(&trace, &ret);
-       trace.rettime = local_clock();
-       ftrace_graph_return(&trace);
-
-       if (unlikely(!ret)) {
-               ftrace_graph_stop();
-               WARN_ON(1);
-               /* Might as well panic. What else to do? */
-               ret = (unsigned long)
-                       dereference_function_descriptor(&panic);
-       }
-
-       /* HACK: we hand over the old functions' return values
-          in %r23 and %r24. Assembly in entry.S will take care
-          and move those to their final registers %ret0 and %ret1 */
-       asm( "copy %0, %%r23 \n\t"
-            "copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) );
-
-       return ret;
-}
-
 /*
  * Hook the return address and push it in the stack of return addrs
  * in current thread info.
  */
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+static void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
 {
        unsigned long old;
-       unsigned long long calltime;
        struct ftrace_graph_ent trace;
+       extern int parisc_return_to_handler;
 
        if (unlikely(ftrace_graph_is_dead()))
                return;
@@ -119,64 +36,47 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
                return;
 
        old = *parent;
-       *parent = (unsigned long)
-                 dereference_function_descriptor(&return_to_handler);
 
-       if (unlikely(!__kernel_text_address(old))) {
-               ftrace_graph_stop();
-               *parent = old;
-               WARN_ON(1);
-               return;
-       }
-
-       calltime = local_clock();
+       trace.func = self_addr;
+       trace.depth = current->curr_ret_stack + 1;
 
-       if (push_return_trace(old, calltime,
-                               self_addr, &trace.depth) == -EBUSY) {
-               *parent = old;
+       /* Only trace if the calling function expects to */
+       if (!ftrace_graph_entry(&trace))
                return;
-       }
 
-       trace.func = self_addr;
+        if (ftrace_push_return_trace(old, self_addr, &trace.depth,
+                       0 ) == -EBUSY)
+                return;
 
-       /* Only trace if the calling function expects to */
-       if (!ftrace_graph_entry(&trace)) {
-               current->curr_ret_stack--;
-               *parent = old;
-       }
+       /* activate parisc_return_to_handler() as return point */
+       *parent = (unsigned long) &parisc_return_to_handler;
 }
-
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
-
-void ftrace_function_trampoline(unsigned long parent,
+void notrace ftrace_function_trampoline(unsigned long parent,
                                unsigned long self_addr,
                                unsigned long org_sp_gr3)
 {
-       extern ftrace_func_t ftrace_trace_function;
+       extern ftrace_func_t ftrace_trace_function;  /* depends on CONFIG_DYNAMIC_FTRACE */
+       extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
 
        if (ftrace_trace_function != ftrace_stub) {
-               ftrace_trace_function(parent, self_addr);
+               /* struct ftrace_ops *op, struct pt_regs *regs); */
+               ftrace_trace_function(parent, self_addr, NULL, NULL);
                return;
        }
+
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       if (ftrace_graph_entry && ftrace_graph_return) {
-               unsigned long sp;
+       if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub ||
+               ftrace_graph_entry != ftrace_graph_entry_stub) {
                unsigned long *parent_rp;
 
-                asm volatile ("copy %%r30, %0" : "=r"(sp));
-               /* sanity check: is stack pointer which we got from
-                  assembler function in entry.S in a reasonable
-                  range compared to current stack pointer? */
-               if ((sp - org_sp_gr3) > 0x400)
-                       return;
-
                /* calculate pointer to %rp in stack */
-               parent_rp = (unsigned long *) org_sp_gr3 - 0x10;
+               parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
                /* sanity check: parent_rp should hold parent */
                if (*parent_rp != parent)
                        return;
-               
+
                prepare_ftrace_return(parent_rp, self_addr);
                return;
        }
index 75aa0db9f69efe2fb3fa00d10d6fc66bd3f2b923..bbbe360b458f511c068620db2dd670a770ea8362 100644 (file)
@@ -129,6 +129,15 @@ $pgt_fill_loop:
        /* And the stack pointer too */
        ldo             THREAD_SZ_ALGN(%r6),%sp
 
+#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
+       .import _mcount,data
+       /* initialize mcount FPTR */
+       /* Get the global data pointer */
+       loadgp
+       load32          PA(_mcount), %r10
+       std             %dp,0x18(%r10)
+#endif
+
 #ifdef CONFIG_SMP
        /* Set the smp rendezvous address into page zero.
        ** It would be safer to do this in init_smp_config() but
index 8dde19962a5b49fbce13685656911c8f47622f3f..f63c96cd360863ac312aca929475de7629695169 100644 (file)
@@ -31,6 +31,7 @@
 #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
                                        0x00000040
 
+/* Reserved - do not use               0x00000004 */
 #define PPC_FEATURE_TRUE_LE            0x00000002
 #define PPC_FEATURE_PPC_LE             0x00000001
 
index 7030b035905dbf85b03bd36fa4bdc2a4a14982de..a15fe1d4e84aec9955622b603823fe44633da98d 100644 (file)
@@ -148,23 +148,25 @@ static struct ibm_pa_feature {
        unsigned long   cpu_features;   /* CPU_FTR_xxx bit */
        unsigned long   mmu_features;   /* MMU_FTR_xxx bit */
        unsigned int    cpu_user_ftrs;  /* PPC_FEATURE_xxx bit */
+       unsigned int    cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
        unsigned char   pabyte;         /* byte number in ibm,pa-features */
        unsigned char   pabit;          /* bit number (big-endian) */
        unsigned char   invert;         /* if 1, pa bit set => clear feature */
 } ibm_pa_features[] __initdata = {
-       {0, 0, PPC_FEATURE_HAS_MMU,     0, 0, 0},
-       {0, 0, PPC_FEATURE_HAS_FPU,     0, 1, 0},
-       {CPU_FTR_CTRL, 0, 0,            0, 3, 0},
-       {CPU_FTR_NOEXECUTE, 0, 0,       0, 6, 0},
-       {CPU_FTR_NODSISRALIGN, 0, 0,    1, 1, 1},
-       {0, MMU_FTR_CI_LARGE_PAGE, 0,   1, 2, 0},
-       {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
+       {0, 0, PPC_FEATURE_HAS_MMU, 0,          0, 0, 0},
+       {0, 0, PPC_FEATURE_HAS_FPU, 0,          0, 1, 0},
+       {CPU_FTR_CTRL, 0, 0, 0,                 0, 3, 0},
+       {CPU_FTR_NOEXECUTE, 0, 0, 0,            0, 6, 0},
+       {CPU_FTR_NODSISRALIGN, 0, 0, 0,         1, 1, 1},
+       {0, MMU_FTR_CI_LARGE_PAGE, 0, 0,                1, 2, 0},
+       {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
        /*
-        * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
-        * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
-        * which is 0 if the kernel doesn't support TM.
+        * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
+        * we don't want to turn on TM here, so we use the *_COMP versions
+        * which are 0 if the kernel doesn't support TM.
         */
-       {CPU_FTR_TM_COMP, 0, 0,         22, 0, 0},
+       {CPU_FTR_TM_COMP, 0, 0,
+        PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
 };
 
 static void __init scan_features(unsigned long node, const unsigned char *ftrs,
@@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
                if (bit ^ fp->invert) {
                        cur_cpu_spec->cpu_features |= fp->cpu_features;
                        cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
+                       cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
                        cur_cpu_spec->mmu_features |= fp->mmu_features;
                } else {
                        cur_cpu_spec->cpu_features &= ~fp->cpu_features;
                        cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
+                       cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
                        cur_cpu_spec->mmu_features &= ~fp->mmu_features;
                }
        }
index aad23e3dff2c17b0bba5a7d2740ea47c21839480..bf24ab1889215abed3b8759dae2d45177ab960ab 100644 (file)
@@ -4,6 +4,9 @@ config MMU
 config ZONE_DMA
        def_bool y
 
+config CPU_BIG_ENDIAN
+       def_bool y
+
 config LOCKDEP_SUPPORT
        def_bool y
 
index b6bfa169a002c051649a2909281fe8c203a00ee7..535a46d46d2894e486284b4a8eb74df38738305f 100644 (file)
@@ -44,7 +44,8 @@ struct zpci_fmb {
        u64 rpcit_ops;
        u64 dma_rbytes;
        u64 dma_wbytes;
-} __packed __aligned(64);
+       u64 pad[2];
+} __packed __aligned(128);
 
 enum zpci_state {
        ZPCI_FN_STATE_RESERVED,
index 781a9cf9b002930429697dd973b214b844a5c244..e10f8337367b31c3de87b045937070e5dd975f94 100644 (file)
@@ -13,4 +13,6 @@
 #define __NR_seccomp_exit_32   __NR_exit
 #define __NR_seccomp_sigreturn_32 __NR_sigreturn
 
+#include <asm-generic/seccomp.h>
+
 #endif /* _ASM_S390_SECCOMP_H */
index d4549c9645892aef684c60ac73a2c8d22886e58e..e5f50a7d2f4eb07fd2037c605e4c7e87160455b2 100644 (file)
@@ -105,6 +105,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
                        if (_raw_compare_and_swap(&lp->lock, 0, cpu))
                                return;
                        local_irq_restore(flags);
+                       continue;
                }
                /* Check if the lock owner is running. */
                if (first_diag && cpu_is_preempted(~owner)) {
index 1baf0ba962426f8e92b6789aabacaf5c47787014..c9f8bbdb1bf8e901505aca6378705a25db9cb46f 100644 (file)
@@ -34,11 +34,6 @@ enum {
 DECLARE_PER_CPU(int, cpu_state);
 
 void smp_message_recv(unsigned int msg);
-void smp_timer_broadcast(const struct cpumask *mask);
-
-void local_timer_interrupt(void);
-void local_timer_setup(unsigned int cpu);
-void local_timer_stop(unsigned int cpu);
 
 void arch_send_call_function_single_ipi(int cpu);
 void arch_send_call_function_ipi_mask(const struct cpumask *mask);
index b0a282d65f6a16bc0753faac736e67adc00f62e7..358e3f516ef6ca78b3c7844d174140cc99f759c8 100644 (file)
@@ -17,7 +17,7 @@
 
 #define mc_capable()    (1)
 
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+const struct cpumask *cpu_coregroup_mask(int cpu);
 
 extern cpumask_t cpu_core_map[NR_CPUS];
 
index 4a298808789c46cc3502348b283f3b22b02be7dd..839612c8a0a052e1a1a674ece5dcb7541ce6c467 100644 (file)
@@ -73,8 +73,6 @@ static void shx3_prepare_cpus(unsigned int max_cpus)
 {
        int i;
 
-       local_timer_setup(0);
-
        BUILD_BUG_ON(SMP_MSG_NR >= 8);
 
        for (i = 0; i < SMP_MSG_NR; i++)
index 772caffba22fb644a077bfe8707148f75a1dc14a..c82912a61d74f26a3d3b1b3e47913c899329ecf6 100644 (file)
@@ -21,7 +21,7 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
 cpumask_t cpu_core_map[NR_CPUS];
 EXPORT_SYMBOL(cpu_core_map);
 
-static cpumask_t cpu_coregroup_map(unsigned int cpu)
+static cpumask_t cpu_coregroup_map(int cpu)
 {
        /*
         * Presently all SH-X3 SMP cores are multi-cores, so just keep it
@@ -30,7 +30,7 @@ static cpumask_t cpu_coregroup_map(unsigned int cpu)
        return *cpu_possible_mask;
 }
 
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+const struct cpumask *cpu_coregroup_mask(int cpu)
 {
        return &cpu_core_map[cpu];
 }
index 6915ff2bd9962e0b495d0af05c18f51a9382c36e..8774cb23064fe417cf35935c33e876eb9faa9670 100644 (file)
@@ -26,7 +26,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
        vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4
 
 KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
-KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
+KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC)
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
 cflags-$(CONFIG_X86_32) := -march=i386
 cflags-$(CONFIG_X86_64) := -mcmodel=small
@@ -40,6 +40,18 @@ GCOV_PROFILE := n
 UBSAN_SANITIZE :=n
 
 LDFLAGS := -m elf_$(UTS_MACHINE)
+ifeq ($(CONFIG_RELOCATABLE),y)
+# If kernel is relocatable, build compressed kernel as PIE.
+ifeq ($(CONFIG_X86_32),y)
+LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
+else
+# To build 64-bit compressed kernel as PIE, we disable relocation
+# overflow check to avoid relocation overflow error with a new linker
+# command-line option, -z noreloc-overflow.
+LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
+       && echo "-z noreloc-overflow -pie --no-dynamic-linker")
+endif
+endif
 LDFLAGS_vmlinux := -T
 
 hostprogs-y    := mkpiggy
index 8ef964ddc18ec656b1e3b0f038adf134484dbdd2..0256064da8da38c69098cbccc75a19cc0493ca82 100644 (file)
 #include <asm/asm-offsets.h>
 #include <asm/bootparam.h>
 
+/*
+ * The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X
+ * relocation to get the symbol address in PIC.  When the compressed x86
+ * kernel isn't built as PIC, the linker optimizes R_386_GOT32X
+ * relocations to their fixed symbol addresses.  However, when the
+ * compressed x86 kernel is loaded at a different address, it leads
+ * to the following load failure:
+ *
+ *   Failed to allocate space for phdrs
+ *
+ * during the decompression stage.
+ *
+ * If the compressed x86 kernel is relocatable at run-time, it should be
+ * compiled with -fPIE, instead of -fPIC, if possible and should be built as
+ * Position Independent Executable (PIE) so that linker won't optimize
+ * R_386_GOT32X relocation to its fixed symbol address.  Older
+ * linkers generate R_386_32 relocations against locally defined symbols,
+ * _bss, _ebss, _got and _egot, in PIE.  It isn't wrong, just less
+ * optimal than R_386_RELATIVE.  But the x86 kernel fails to properly handle
+ * R_386_32 relocations when relocating the kernel.  To generate
+ * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as
+ * hidden:
+ */
+       .hidden _bss
+       .hidden _ebss
+       .hidden _got
+       .hidden _egot
+
        __HEAD
 ENTRY(startup_32)
 #ifdef CONFIG_EFI_STUB
index b0c0d16ef58d1099342c97aff83767dd35c73691..86558a1991393c509bc3005c021c2ab490b26b2a 100644 (file)
 #include <asm/asm-offsets.h>
 #include <asm/bootparam.h>
 
+/*
+ * Locally defined symbols should be marked hidden:
+ */
+       .hidden _bss
+       .hidden _ebss
+       .hidden _got
+       .hidden _egot
+
        __HEAD
        .code32
 ENTRY(startup_32)
index a8a0224fa0f8a4682f76281034a3172001f50200..081255cea1ee5d442a75529172e097afce7f396c 100644 (file)
@@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
 
                        req = cast_mcryptd_ctx_to_req(req_ctx);
                        if (irqs_disabled())
-                               rctx->complete(&req->base, ret);
+                               req_ctx->complete(&req->base, ret);
                        else {
                                local_bh_disable();
-                               rctx->complete(&req->base, ret);
+                               req_ctx->complete(&req->base, ret);
                                local_bh_enable();
                        }
                }
index 0a850100c5944641717c797e5d4f129b2cf9a79f..2658e2af74ec4c3f433b9958b4498d7f7d603fed 100644 (file)
@@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
 void mce_gen_pool_process(void)
 {
        struct llist_node *head;
-       struct mce_evt_llist *node;
+       struct mce_evt_llist *node, *tmp;
        struct mce *mce;
 
        head = llist_del_all(&mce_event_llist);
@@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
                return;
 
        head = llist_reverse_order(head);
-       llist_for_each_entry(node, head, llnode) {
+       llist_for_each_entry_safe(node, tmp, head, llnode) {
                mce = &node->mce;
                atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
                gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
index 319b08a5b6ed353fb1ca9a03bc1e29626b59677d..2367ae07eb76db48964ef311e6cdc50f8751ecee 100644 (file)
@@ -146,6 +146,31 @@ int default_check_phys_apicid_present(int phys_apicid)
 
 struct boot_params boot_params;
 
+/*
+ * Machine setup..
+ */
+static struct resource data_resource = {
+       .name   = "Kernel data",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
+};
+
+static struct resource code_resource = {
+       .name   = "Kernel code",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
+};
+
+static struct resource bss_resource = {
+       .name   = "Kernel bss",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
+};
+
+
 #ifdef CONFIG_X86_32
 /* cpu data as detected by the assembly code in head.S */
 struct cpuinfo_x86 new_cpu_data = {
@@ -924,6 +949,13 @@ void __init setup_arch(char **cmdline_p)
 
        mpx_mm_init(&init_mm);
 
+       code_resource.start = __pa_symbol(_text);
+       code_resource.end = __pa_symbol(_etext)-1;
+       data_resource.start = __pa_symbol(_etext);
+       data_resource.end = __pa_symbol(_edata)-1;
+       bss_resource.start = __pa_symbol(__bss_start);
+       bss_resource.end = __pa_symbol(__bss_stop)-1;
+
 #ifdef CONFIG_CMDLINE_BOOL
 #ifdef CONFIG_CMDLINE_OVERRIDE
        strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
@@ -987,6 +1019,11 @@ void __init setup_arch(char **cmdline_p)
 
        x86_init.resources.probe_roms();
 
+       /* after parse_early_param, so could debug it */
+       insert_resource(&iomem_resource, &code_resource);
+       insert_resource(&iomem_resource, &data_resource);
+       insert_resource(&iomem_resource, &bss_resource);
+
        e820_add_kernel_range();
        trim_bios_range();
 #ifdef CONFIG_X86_32
index 8efb839948e512e9aac6aaf544230195614297c1..bbbaa802d13efc8b1e57f7defa351cacb2aaab78 100644 (file)
@@ -534,6 +534,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                        do_cpuid_1_ent(&entry[i], function, idx);
                        if (idx == 1) {
                                entry[i].eax &= kvm_cpuid_D_1_eax_x86_features;
+                               cpuid_mask(&entry[i].eax, CPUID_D_1_EAX);
                                entry[i].ebx = 0;
                                if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
                                        entry[i].ebx =
index b70df72e2b33d417307d01f57bca57388650d34f..66b33b96a31b47138c9fef94c20e034d07678cb7 100644 (file)
@@ -173,10 +173,9 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
        int index = (pfec >> 1) +
                    (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
        bool fault = (mmu->permissions[index] >> pte_access) & 1;
+       u32 errcode = PFERR_PRESENT_MASK;
 
        WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
-       pfec |= PFERR_PRESENT_MASK;
-
        if (unlikely(mmu->pkru_mask)) {
                u32 pkru_bits, offset;
 
@@ -189,15 +188,15 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
 
                /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
-               offset = pfec - 1 +
+               offset = (pfec & ~1) +
                        ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
 
                pkru_bits &= mmu->pkru_mask >> offset;
-               pfec |= -pkru_bits & PFERR_PK_MASK;
+               errcode |= -pkru_bits & PFERR_PK_MASK;
                fault |= (pkru_bits != 0);
        }
 
-       return -(uint32_t)fault & pfec;
+       return -(u32)fault & errcode;
 }
 
 void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
index 1d971c7553c3847f0d1335487ce551a19875b709..bc019f70e0b6bb374d851bdb3be32ea09e49768b 100644 (file)
@@ -360,7 +360,7 @@ retry_walk:
                        goto error;
 
                if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) {
-                       errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
+                       errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
                        goto error;
                }
 
index 0a2c70e43bc884de800dc792ebeab81fb75478ef..9b7798c7b210e75499644ed1ca35b643fe743208 100644 (file)
@@ -700,7 +700,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
                if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
                        return 1;
        }
-       kvm_put_guest_xcr0(vcpu);
        vcpu->arch.xcr0 = xcr0;
 
        if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
@@ -6590,8 +6589,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        kvm_x86_ops->prepare_guest_switch(vcpu);
        if (vcpu->fpu_active)
                kvm_load_guest_fpu(vcpu);
-       kvm_load_guest_xcr0(vcpu);
-
        vcpu->mode = IN_GUEST_MODE;
 
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -6618,6 +6615,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                goto cancel_injection;
        }
 
+       kvm_load_guest_xcr0(vcpu);
+
        if (req_immediate_exit)
                smp_send_reschedule(vcpu->cpu);
 
@@ -6667,6 +6666,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
 
+       kvm_put_guest_xcr0(vcpu);
+
        /* Interrupt is enabled by handle_external_intr() */
        kvm_x86_ops->handle_external_intr(vcpu);
 
@@ -7314,7 +7315,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
         * and assume host would use all available bits.
         * Guest xcr0 would be loaded later.
         */
-       kvm_put_guest_xcr0(vcpu);
        vcpu->guest_fpu_loaded = 1;
        __kernel_fpu_begin();
        __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
@@ -7323,8 +7323,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 {
-       kvm_put_guest_xcr0(vcpu);
-
        if (!vcpu->guest_fpu_loaded) {
                vcpu->fpu_counter = 0;
                return;
index 2c6ae2aed2c4711072386643d4260a808cb8d17b..d7eb77e1e3a8f4be13f6016ed0e7f7ab7c051da8 100644 (file)
@@ -361,15 +361,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
                        goto out_del;
        }
 
+       err = hd_ref_init(p);
+       if (err) {
+               if (flags & ADDPART_FLAG_WHOLEDISK)
+                       goto out_remove_file;
+               goto out_del;
+       }
+
        /* everything is up and running, commence */
        rcu_assign_pointer(ptbl->part[partno], p);
 
        /* suppress uevent if the disk suppresses it */
        if (!dev_get_uevent_suppress(ddev))
                kobject_uevent(&pdev->kobj, KOBJ_ADD);
-
-       if (!hd_ref_init(p))
-               return p;
+       return p;
 
 out_free_info:
        free_part_info(p);
@@ -378,6 +383,8 @@ out_free_stats:
 out_free:
        kfree(p);
        return ERR_PTR(err);
+out_remove_file:
+       device_remove_file(pdev, &dev_attr_whole_disk);
 out_del:
        kobject_put(p->holder_dir);
        device_del(pdev);
index 1cea67d43e1db25c55db64b2f805f035428faa29..ead8dc0d084e749e35733abc164f1e209aca3f6f 100644 (file)
@@ -387,16 +387,16 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
        req_ctx->child_req.src = req->src;
        req_ctx->child_req.src_len = req->src_len;
        req_ctx->child_req.dst = req_ctx->out_sg;
-       req_ctx->child_req.dst_len = ctx->key_size - 1;
+       req_ctx->child_req.dst_len = ctx->key_size ;
 
-       req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+       req_ctx->out_buf = kmalloc(ctx->key_size,
                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
                        GFP_KERNEL : GFP_ATOMIC);
        if (!req_ctx->out_buf)
                return -ENOMEM;
 
        pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
-                       ctx->key_size - 1, NULL);
+                           ctx->key_size, NULL);
 
        akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
        akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
@@ -595,16 +595,16 @@ static int pkcs1pad_verify(struct akcipher_request *req)
        req_ctx->child_req.src = req->src;
        req_ctx->child_req.src_len = req->src_len;
        req_ctx->child_req.dst = req_ctx->out_sg;
-       req_ctx->child_req.dst_len = ctx->key_size - 1;
+       req_ctx->child_req.dst_len = ctx->key_size;
 
-       req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+       req_ctx->out_buf = kmalloc(ctx->key_size,
                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
                        GFP_KERNEL : GFP_ATOMIC);
        if (!req_ctx->out_buf)
                return -ENOMEM;
 
        pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
-                       ctx->key_size - 1, NULL);
+                           ctx->key_size, NULL);
 
        akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
        akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
index 786be8fed39e980add0b9e1cddc9e0682171dc9a..1f635471f318cb9d9395e26466ea9abebf6ddb7f 100644 (file)
@@ -136,7 +136,6 @@ static bool bcma_is_core_needed_early(u16 core_id)
        return false;
 }
 
-#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
 static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
                                                     struct bcma_device *core)
 {
@@ -184,7 +183,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
        struct of_phandle_args out_irq;
        int ret;
 
-       if (!parent || !parent->dev.of_node)
+       if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
                return 0;
 
        ret = bcma_of_irq_parse(parent, core, &out_irq, num);
@@ -202,23 +201,15 @@ static void bcma_of_fill_device(struct platform_device *parent,
 {
        struct device_node *node;
 
+       if (!IS_ENABLED(CONFIG_OF_IRQ))
+               return;
+
        node = bcma_of_find_child_device(parent, core);
        if (node)
                core->dev.of_node = node;
 
        core->irq = bcma_of_get_irq(parent, core, 0);
 }
-#else
-static void bcma_of_fill_device(struct platform_device *parent,
-                               struct bcma_device *core)
-{
-}
-static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
-                                          struct bcma_device *core, int num)
-{
-       return 0;
-}
-#endif /* CONFIG_OF */
 
 unsigned int bcma_core_irq(struct bcma_device *core, int num)
 {
index 423f4ca7d712dda6f012c32954f19c9ce3af9d9c..80cf8add46ff3667d896fca88aaea3fbf338ad27 100644 (file)
@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
        bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
        iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
                      bio_segments(bio), blk_rq_bytes(cmd->rq));
+       /*
+        * This bio may be started from the middle of the 'bvec'
+        * because of bio splitting, so offset from the bvec must
+        * be passed to iov iterator
+        */
+       iter.iov_offset = bio->bi_iter.bi_bvec_done;
 
        cmd->iocb.ki_pos = pos;
        cmd->iocb.ki_filp = file;
index c2e52864bb03a5d862c00f712527df2d25e9217b..ce54a0160faaf4f5a48289e395f98a8c316bdc58 100644 (file)
@@ -972,7 +972,7 @@ int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr)
                }
        }
 
-       pr_err("invalid dram address 0x%x\n", phyaddr);
+       pr_err("invalid dram address %pa\n", &phyaddr);
        return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info);
index 834a2aeaf27a8d80b9dd1df110473dcab4ef154b..350b7309c26d714c1a80c2a56b2a2d9acf1040bb 100644 (file)
@@ -108,7 +108,7 @@ static int uniphier_system_bus_check_overlap(
 
        for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
                for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
-                       if (priv->bank[i].end > priv->bank[j].base ||
+                       if (priv->bank[i].end > priv->bank[j].base &&
                            priv->bank[i].base < priv->bank[j].end) {
                                dev_err(priv->dev,
                                        "region overlap between bank%d and bank%d\n",
index ca9c40309757a7c0f89e510577e9597314dcfcc1..5132c9cde50dd9ed0d635419725961d28d42ccdf 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/clk.h>
 #include <linux/platform_device.h>
 #include <linux/hw_random.h>
+#include <linux/of.h>
 
 #define RNG_CTRL                       0x00
 #define RNG_EN                         (1 << 0)
index b87596b591b3ca9fdf332884700e5a3cc96ebf10..e93405f0eac46565d18c25873d1113c3a1aa65fd 100644 (file)
@@ -1491,6 +1491,9 @@ static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
 {
        unsigned int new_freq;
 
+       if (cpufreq_suspended)
+               return 0;
+
        new_freq = cpufreq_driver->get(policy->cpu);
        if (!new_freq)
                return 0;
index 8b5a415ee14a53d21e9358e2b6707df852e827b8..30fe323c4551b4628de4c5878500d29eab8d57ca 100644 (file)
@@ -1130,6 +1130,10 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
                sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
                                      int_tofp(duration_ns));
                core_busy = mul_fp(core_busy, sample_ratio);
+       } else {
+               sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
+               if (sample_ratio < int_tofp(1))
+                       core_busy = 0;
        }
 
        cpu->sample.busy_scaled = core_busy;
index 3d9acc53d2473818d1e37c4af432a4afcbdf4ec4..60fc0fa26fd3b19ab557888d9bd6e61c208fe0f6 100644 (file)
@@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
        struct ccp_aes_cmac_exp_ctx state;
 
+       /* Don't let anything leak to 'out' */
+       memset(&state, 0, sizeof(state));
+
        state.null_msg = rctx->null_msg;
        memcpy(state.iv, rctx->iv, sizeof(state.iv));
        state.buf_count = rctx->buf_count;
index b5ad72897dc25b8d11642315f3e087af32d1c845..8f36af62fe951032d22dfd9f7ea0150e43d6360b 100644 (file)
@@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
        struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
        struct ccp_sha_exp_ctx state;
 
+       /* Don't let anything leak to 'out' */
+       memset(&state, 0, sizeof(state));
+
        state.type = rctx->type;
        state.msg_bits = rctx->msg_bits;
        state.first = rctx->first;
index 5ad0ec1f0e29f750eee5c70889df5427d7413556..97199b3c25a22cd907c1a73648c76a96a594b370 100644 (file)
@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
 static void dwc_initialize(struct dw_dma_chan *dwc)
 {
        struct dw_dma *dw = to_dw_dma(dwc->chan.device);
-       struct dw_dma_slave *dws = dwc->chan.private;
        u32 cfghi = DWC_CFGH_FIFO_MODE;
        u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
 
        if (dwc->initialized == true)
                return;
 
-       if (dws) {
-               /*
-                * We need controller-specific data to set up slave
-                * transfers.
-                */
-               BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
-               cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
-               cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
-       } else {
-               cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
-               cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
-       }
+       cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
+       cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
 
        channel_writel(dwc, CFG_LO, cfglo);
        channel_writel(dwc, CFG_HI, cfghi);
@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
        struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
        struct dw_dma_slave *dws = param;
 
-       if (!dws || dws->dma_dev != chan->device->dev)
+       if (dws->dma_dev != chan->device->dev)
                return false;
 
        /* We have to copy data since dws can be temporary storage */
@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
         * doesn't mean what you think it means), and status writeback.
         */
 
+       /*
+        * We need controller-specific data to set up slave transfers.
+        */
+       if (chan->private && !dw_dma_filter(chan, chan->private)) {
+               dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
+               return -EINVAL;
+       }
+
        /* Enable controller here if needed */
        if (!dw->in_use)
                dw_dma_on(dw);
@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
        spin_lock_irqsave(&dwc->lock, flags);
        list_splice_init(&dwc->free_list, &list);
        dwc->descs_allocated = 0;
+
+       /* Clear custom channel configuration */
+       dwc->src_id = 0;
+       dwc->dst_id = 0;
+
+       dwc->src_master = 0;
+       dwc->dst_master = 0;
+
        dwc->initialized = false;
 
        /* Disable interrupts */
index ee3463e774f8e4dc5c46e2614719f9d0723cfd06..04070baab78ab0cc7772085dd70828741317dd5a 100644 (file)
@@ -1238,6 +1238,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
        struct edma_desc *edesc;
        dma_addr_t src_addr, dst_addr;
        enum dma_slave_buswidth dev_width;
+       bool use_intermediate = false;
        u32 burst;
        int i, ret, nslots;
 
@@ -1279,8 +1280,21 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
         * but the synchronization is difficult to achieve with Cyclic and
         * cannot be guaranteed, so we error out early.
         */
-       if (nslots > MAX_NR_SG)
-               return NULL;
+       if (nslots > MAX_NR_SG) {
+               /*
+                * If the burst and period sizes are the same, we can put
+                * the full buffer into a single period and activate
+                * intermediate interrupts. This will produce interrupts
+                * after each burst, which is also after each desired period.
+                */
+               if (burst == period_len) {
+                       period_len = buf_len;
+                       nslots = 2;
+                       use_intermediate = true;
+               } else {
+                       return NULL;
+               }
+       }
 
        edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
                        GFP_ATOMIC);
@@ -1358,8 +1372,13 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
                /*
                 * Enable period interrupt only if it is requested
                 */
-               if (tx_flags & DMA_PREP_INTERRUPT)
+               if (tx_flags & DMA_PREP_INTERRUPT) {
                        edesc->pset[i].param.opt |= TCINTEN;
+
+                       /* Also enable intermediate interrupts if necessary */
+                       if (use_intermediate)
+                               edesc->pset[i].param.opt |= ITCINTEN;
+               }
        }
 
        /* Place the cyclic channel to highest priority queue */
@@ -1570,32 +1589,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
-{
-       struct platform_device *tc_pdev;
-       int ret;
-
-       if (!IS_ENABLED(CONFIG_OF) || !tc)
-               return;
-
-       tc_pdev = of_find_device_by_node(tc->node);
-       if (!tc_pdev) {
-               pr_err("%s: TPTC device is not found\n", __func__);
-               return;
-       }
-       if (!pm_runtime_enabled(&tc_pdev->dev))
-               pm_runtime_enable(&tc_pdev->dev);
-
-       if (enable)
-               ret = pm_runtime_get_sync(&tc_pdev->dev);
-       else
-               ret = pm_runtime_put_sync(&tc_pdev->dev);
-
-       if (ret < 0)
-               pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
-                      enable ? "get" : "put", dev_name(&tc_pdev->dev));
-}
-
 /* Alloc channel resources */
 static int edma_alloc_chan_resources(struct dma_chan *chan)
 {
@@ -1632,8 +1625,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
                EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
                echan->hw_triggered ? "HW" : "SW");
 
-       edma_tc_set_pm_state(echan->tc, true);
-
        return 0;
 
 err_slot:
@@ -1670,7 +1661,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
                echan->alloced = false;
        }
 
-       edma_tc_set_pm_state(echan->tc, false);
        echan->tc = NULL;
        echan->hw_triggered = false;
 
@@ -2417,10 +2407,8 @@ static int edma_pm_suspend(struct device *dev)
        int i;
 
        for (i = 0; i < ecc->num_channels; i++) {
-               if (echan[i].alloced) {
+               if (echan[i].alloced)
                        edma_setup_interrupt(&echan[i], false);
-                       edma_tc_set_pm_state(echan[i].tc, false);
-               }
        }
 
        return 0;
@@ -2450,8 +2438,6 @@ static int edma_pm_resume(struct device *dev)
 
                        /* Set up channel -> slot mapping for the entry slot */
                        edma_set_chmap(&echan[i], echan[i].slot[0]);
-
-                       edma_tc_set_pm_state(echan[i].tc, true);
                }
        }
 
@@ -2475,7 +2461,8 @@ static struct platform_driver edma_driver = {
 
 static int edma_tptc_probe(struct platform_device *pdev)
 {
-       return 0;
+       pm_runtime_enable(&pdev->dev);
+       return pm_runtime_get_sync(&pdev->dev);
 }
 
 static struct platform_driver edma_tptc_driver = {
index eef145edb936816f3686d343742a709392a5dffa..ee510515ce187af75d3ba873b65d5de26cc3acb4 100644 (file)
@@ -64,10 +64,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
 
        if (hsuc->direction == DMA_MEM_TO_DEV) {
                bsr = config->dst_maxburst;
-               mtsr = config->dst_addr_width;
+               mtsr = config->src_addr_width;
        } else if (hsuc->direction == DMA_DEV_TO_MEM) {
                bsr = config->src_maxburst;
-               mtsr = config->src_addr_width;
+               mtsr = config->dst_addr_width;
        }
 
        hsu_chan_disable(hsuc);
@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
        sr = hsu_chan_readl(hsuc, HSU_CH_SR);
        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 
-       return sr;
+       return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
 }
 
 irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
 static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
 {
        struct hsu_dma_desc *desc = hsuc->desc;
-       size_t bytes = desc->length;
+       size_t bytes = 0;
        int i;
 
-       i = desc->active % HSU_DMA_CHAN_NR_DESC;
+       for (i = desc->active; i < desc->nents; i++)
+               bytes += desc->sg[i].len;
+
+       i = HSU_DMA_CHAN_NR_DESC - 1;
        do {
                bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
        } while (--i >= 0);
index 578a8ee8cd054429b3e3fd6da2341b49020721cc..6b070c22b1dfc2bca1dc04ca12218de17d1f0d70 100644 (file)
@@ -41,6 +41,9 @@
 #define HSU_CH_SR_DESCTO(x)    BIT(8 + (x))
 #define HSU_CH_SR_DESCTO_ANY   (BIT(11) | BIT(10) | BIT(9) | BIT(8))
 #define HSU_CH_SR_CHE          BIT(15)
+#define HSU_CH_SR_DESCE(x)     BIT(16 + (x))
+#define HSU_CH_SR_DESCE_ANY    (BIT(19) | BIT(18) | BIT(17) | BIT(16))
+#define HSU_CH_SR_CDESC_ANY    (BIT(31) | BIT(30))
 
 /* Bits in HSU_CH_CR */
 #define HSU_CH_CR_CHA          BIT(0)
index 43bd5aee7ffe093d78e6f8c1f43b75084f8f1c8e..1e984e18c1266b74fc01c247fad36f3e132ad587 100644 (file)
@@ -48,6 +48,7 @@ struct omap_chan {
        unsigned dma_sig;
        bool cyclic;
        bool paused;
+       bool running;
 
        int dma_ch;
        struct omap_desc *desc;
@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
 
        /* Enable channel */
        omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
+
+       c->running = true;
 }
 
 static void omap_dma_stop(struct omap_chan *c)
@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c)
 
                omap_dma_chan_write(c, CLNK_CTRL, val);
        }
+
+       c->running = false;
 }
 
 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
        struct omap_chan *c = to_omap_dma_chan(chan);
        struct virt_dma_desc *vd;
        enum dma_status ret;
-       uint32_t ccr;
        unsigned long flags;
 
-       ccr = omap_dma_chan_read(c, CCR);
-       /* The channel is no longer active, handle the completion right away */
-       if (!(ccr & CCR_ENABLE))
-               omap_dma_callback(c->dma_ch, 0, c);
-
        ret = dma_cookie_status(chan, cookie, txstate);
+
+       if (!c->paused && c->running) {
+               uint32_t ccr = omap_dma_chan_read(c, CCR);
+               /*
+                * The channel is no longer active, set the return value
+                * accordingly
+                */
+               if (!(ccr & CCR_ENABLE))
+                       ret = DMA_COMPLETE;
+       }
+
        if (ret == DMA_COMPLETE || !txstate)
                return ret;
 
@@ -945,9 +955,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
        d->ccr = c->ccr;
        d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
 
-       d->cicr = CICR_DROP_IE;
-       if (tx_flags & DMA_PREP_INTERRUPT)
-               d->cicr |= CICR_FRAME_IE;
+       d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
 
        d->csdp = data_type;
 
index 0ee0321868d36b3054acac0a02356e09cc0749a1..ef67f278e076da61c3e08428a70bffd0d8f9341c 100644 (file)
@@ -1236,7 +1236,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
        struct xilinx_vdma_device *xdev = ofdma->of_dma_data;
        int chan_id = dma_spec->args[0];
 
-       if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE)
+       if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
                return NULL;
 
        return dma_get_slave_channel(&xdev->chan[chan_id]->common);
index 841a4b58639543a4d5107dfaab9544b0a5425d37..8b3226dca1d9d25e004bbb2a030a7d77e9ff48d2 100644 (file)
@@ -348,8 +348,7 @@ static int palmas_usb_probe(struct platform_device *pdev)
                                                palmas_vbus_irq_handler,
                                                IRQF_TRIGGER_FALLING |
                                                IRQF_TRIGGER_RISING |
-                                               IRQF_ONESHOT |
-                                               IRQF_EARLY_RESUME,
+                                               IRQF_ONESHOT,
                                                "palmas_usb_vbus",
                                                palmas_usb);
                if (status < 0) {
index aa1f743152a2fd317a82c8ac7ca3daf36e865b46..8714f8c271babfff5566765605b5429f33bfa5d8 100644 (file)
@@ -203,7 +203,19 @@ void __init efi_init(void)
 
        reserve_regions();
        early_memunmap(memmap.map, params.mmap_size);
-       memblock_mark_nomap(params.mmap & PAGE_MASK,
-                           PAGE_ALIGN(params.mmap_size +
-                                      (params.mmap & ~PAGE_MASK)));
+
+       if (IS_ENABLED(CONFIG_ARM)) {
+               /*
+                * ARM currently does not allow ioremap_cache() to be called on
+                * memory regions that are covered by struct page. So remove the
+                * UEFI memory map from the linear mapping.
+                */
+               memblock_mark_nomap(params.mmap & PAGE_MASK,
+                                   PAGE_ALIGN(params.mmap_size +
+                                              (params.mmap & ~PAGE_MASK)));
+       } else {
+               memblock_reserve(params.mmap & PAGE_MASK,
+                                PAGE_ALIGN(params.mmap_size +
+                                           (params.mmap & ~PAGE_MASK)));
+       }
 }
index 62a778012fe06cc572964b463f873c987ca34755..1bcbade479dc2e384d4abf0481f5571c95699a16 100644 (file)
@@ -1591,6 +1591,7 @@ struct amdgpu_uvd {
        struct amdgpu_bo        *vcpu_bo;
        void                    *cpu_addr;
        uint64_t                gpu_addr;
+       unsigned                fw_version;
        void                    *saved_bo;
        atomic_t                handles[AMDGPU_MAX_UVD_HANDLES];
        struct drm_file         *filp[AMDGPU_MAX_UVD_HANDLES];
@@ -2034,6 +2035,7 @@ struct amdgpu_device {
 
        /* tracking pinned memory */
        u64 vram_pin_size;
+       u64 invisible_pin_size;
        u64 gart_pin_size;
 
        /* amdkfd interface */
index d6b0bff510aaa719dbb0f9cce269645b06716127..b7b583c42ea82410b2c80825185da124e385219c 100644 (file)
@@ -425,6 +425,10 @@ static int acp_resume(void *handle)
        struct acp_pm_domain *apd;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /* return early if no ACP */
+       if (!adev->acp.acp_genpd)
+               return 0;
+
        /* SMU block will power on ACP irrespective of ACP runtime status.
         * Power off explicitly based on genpd ACP runtime status so that ACP
         * hw and ACP-genpd status are in sync.
index 598eb0cd5aab6a1b437ad04d0c58dce24adc8d9d..b04337de65d193884ba823f511df802b08aadec6 100644 (file)
@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        fw_info.feature = adev->vce.fb_version;
                        break;
                case AMDGPU_INFO_FW_UVD:
-                       fw_info.ver = 0;
+                       fw_info.ver = adev->uvd.fw_version;
                        fw_info.feature = 0;
                        break;
                case AMDGPU_INFO_FW_GMC:
@@ -384,7 +384,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                vram_gtt.vram_size = adev->mc.real_vram_size;
                vram_gtt.vram_size -= adev->vram_pin_size;
                vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
-               vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size;
+               vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
                vram_gtt.gtt_size  = adev->mc.gtt_size;
                vram_gtt.gtt_size -= adev->gart_pin_size;
                return copy_to_user(out, &vram_gtt,
index 8d432e6901af50d2267089757685f2e6f16f4f40..81bd964d3dfc2f12cba2bf0e64b63c677e964d37 100644 (file)
@@ -53,7 +53,7 @@ struct amdgpu_hpd;
 
 #define AMDGPU_MAX_HPD_PINS 6
 #define AMDGPU_MAX_CRTCS 6
-#define AMDGPU_MAX_AFMT_BLOCKS 7
+#define AMDGPU_MAX_AFMT_BLOCKS 9
 
 enum amdgpu_rmx_type {
        RMX_OFF,
@@ -309,8 +309,8 @@ struct amdgpu_mode_info {
        struct atom_context *atom_context;
        struct card_info *atom_card_info;
        bool mode_config_initialized;
-       struct amdgpu_crtc *crtcs[6];
-       struct amdgpu_afmt *afmt[7];
+       struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
+       struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
        /* DVI-I properties */
        struct drm_property *coherent_mode_property;
        /* DAC enable load detect */
index 5b6639faa731e83b1b14c07bf673aa30deff52c8..e557fc1f17c8becfb3ddae209942326d05d5a884 100644 (file)
@@ -424,9 +424,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                bo->pin_count = 1;
                if (gpu_addr != NULL)
                        *gpu_addr = amdgpu_bo_gpu_offset(bo);
-               if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+               if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
                        bo->adev->vram_pin_size += amdgpu_bo_size(bo);
-               else
+                       if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+                               bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+               } else
                        bo->adev->gart_pin_size += amdgpu_bo_size(bo);
        } else {
                dev_err(bo->adev->dev, "%p pin failed\n", bo);
@@ -456,9 +458,11 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
        }
        r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
        if (likely(r == 0)) {
-               if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+               if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
                        bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
-               else
+                       if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+                               bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+               } else
                        bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
        } else {
                dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
index 6f3369de232fe5f545382a6cc2ca528bdf8c1026..11af4492b4bee2e3bf29d7910e9d25b3adb5f843 100644 (file)
@@ -223,6 +223,8 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
        struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
 
+       if (amdgpu_ttm_tt_get_usermm(bo->ttm))
+               return -EPERM;
        return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
 }
 
index 338da80006b66ced7671b5ed63fabb02750355ba..871018c634e0af896aa5a864bd87ee0653e35396 100644 (file)
@@ -158,6 +158,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
        DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
                version_major, version_minor, family_id);
 
+       adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
+                               (family_id << 8));
+
        bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
                 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
        r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
@@ -255,6 +258,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
        if (i == AMDGPU_MAX_UVD_HANDLES)
                return 0;
 
+       cancel_delayed_work_sync(&adev->uvd.idle_work);
+
        size = amdgpu_bo_size(adev->uvd.vcpu_bo);
        ptr = adev->uvd.cpu_addr;
 
index 4bec0c108cea9887ed612b252fc6b990547f029a..481a64fa9b470bfa7b2c2290bf6f85b882b4066a 100644 (file)
@@ -234,6 +234,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
        if (i == AMDGPU_MAX_VCE_HANDLES)
                return 0;
 
+       cancel_delayed_work_sync(&adev->vce.idle_work);
        /* TODO: suspending running encoding sessions isn't supported */
        return -EINVAL;
 }
index b6f7d7bff92989737511e8cc20452b6eecf89f05..0f14199cf716e157aa5638b7adebc041958225ac 100644 (file)
@@ -307,7 +307,7 @@ static int tonga_ih_sw_fini(void *handle)
 
        amdgpu_irq_fini(adev);
        amdgpu_ih_ring_fini(adev);
-       amdgpu_irq_add_domain(adev);
+       amdgpu_irq_remove_domain(adev);
 
        return 0;
 }
index 27fbd79d0daf0003be2014cee00c9dcdb49fdb43..e17fbdaf874bd7b7a74e3af2b7e8ee60e4ed8046 100644 (file)
@@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
        u8 sinks[DRM_DP_MAX_SDP_STREAMS];
        int i;
 
+       port = drm_dp_get_validated_port_ref(mgr, port);
+       if (!port)
+               return -EINVAL;
+
        port_num = port->port_num;
        mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
        if (!mstb) {
                mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
 
-               if (!mstb)
+               if (!mstb) {
+                       drm_dp_put_port(port);
                        return -EINVAL;
+               }
        }
 
        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
        kfree(txmsg);
 fail_put:
        drm_dp_put_mst_branch_device(mstb);
+       drm_dp_put_port(port);
        return ret;
 }
 
index 414d7f61aa05cbacb78542b1e3154ea700f6e3bd..558ef9fc39e6c2104b90725950fb7a562e9eefa4 100644 (file)
@@ -205,7 +205,7 @@ static const struct drm_display_mode drm_dmt_modes[] = {
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
        /* 0x0f - 1024x768@43Hz, interlace */
        { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
-                  1208, 1264, 0, 768, 768, 772, 817, 0,
+                  1208, 1264, 0, 768, 768, 776, 817, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
                   DRM_MODE_FLAG_INTERLACE) },
        /* 0x10 - 1024x768@60Hz */
@@ -522,12 +522,12 @@ static const struct drm_display_mode edid_est_modes[] = {
                   720, 840, 0, 480, 481, 484, 500, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
        { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
-                  704,  832, 0, 480, 489, 491, 520, 0,
+                  704,  832, 0, 480, 489, 492, 520, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
        { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
                   768,  864, 0, 480, 483, 486, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
-       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
                   752, 800, 0, 480, 490, 492, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
        { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
@@ -539,7 +539,7 @@ static const struct drm_display_mode edid_est_modes[] = {
        { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
                   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
-       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
                   1136, 1312, 0,  768, 769, 772, 800, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
        { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
@@ -2241,7 +2241,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
 {
        int i, j, m, modes = 0;
        struct drm_display_mode *mode;
-       u8 *est = ((u8 *)timing) + 5;
+       u8 *est = ((u8 *)timing) + 6;
 
        for (i = 0; i < 6; i++) {
                for (j = 7; j >= 0; j--) {
index f17d3927959604301e9e53c7614fcb1e4982e80c..baddf33fb475863379bcffcd7befb4da085e37f2 100644 (file)
@@ -94,7 +94,7 @@ comment "Sub-drivers"
 
 config DRM_EXYNOS_G2D
        bool "G2D"
-       depends on !VIDEO_SAMSUNG_S5P_G2D
+       depends on VIDEO_SAMSUNG_S5P_G2D=n
        select FRAME_VECTOR
        help
          Choose this option if you want to use Exynos G2D for DRM.
index 968b31c522b2ddaa9cc8ed7fc4f675f8e39964d8..23d2f958739b187b7bd28ae4e0f7f7ec7522da6f 100644 (file)
@@ -2,10 +2,10 @@
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
-exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
-               exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
-               exynos_drm_plane.o
+exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
+               exynos_drm_gem.o exynos_drm_core.o exynos_drm_plane.o
 
+exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)    += exynos_drm_fimd.o
 exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON)       += exynos5433_drm_decon.o
index 7f55ba6771c6b94e5f45bee6bdec078c27c74f5b..011211e4167d41dce17d56e8baee0b168b37e7b6 100644 (file)
@@ -101,7 +101,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
        return 0;
 
 err:
-       list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
+       list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
                if (subdrv->close)
                        subdrv->close(dev, subdrv->dev, file);
        }
index d614194644c814211ee069054a492fa1032c8fb0..81cc5537cf2577899bd5c1d011beb404584e08a6 100644 (file)
@@ -199,17 +199,6 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
        return exynos_fb->dma_addr[index];
 }
 
-static void exynos_drm_output_poll_changed(struct drm_device *dev)
-{
-       struct exynos_drm_private *private = dev->dev_private;
-       struct drm_fb_helper *fb_helper = private->fb_helper;
-
-       if (fb_helper)
-               drm_fb_helper_hotplug_event(fb_helper);
-       else
-               exynos_drm_fbdev_init(dev);
-}
-
 static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
        .fb_create = exynos_user_fb_create,
        .output_poll_changed = exynos_drm_output_poll_changed,
index 4ae860c44f1d8e052c1ce3a721e1c7348d14adf0..72d7c0b7c216531c7f3d7af2d1136ee5a3ecb01b 100644 (file)
@@ -317,3 +317,14 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
 
        drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
 }
+
+void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+       struct exynos_drm_private *private = dev->dev_private;
+       struct drm_fb_helper *fb_helper = private->fb_helper;
+
+       if (fb_helper)
+               drm_fb_helper_hotplug_event(fb_helper);
+       else
+               exynos_drm_fbdev_init(dev);
+}
index e16d7f0ae1920eb944364747b3f009e7976654f4..330eef87f7180b1b726ff41933874a6568b0474d 100644 (file)
 #ifndef _EXYNOS_DRM_FBDEV_H_
 #define _EXYNOS_DRM_FBDEV_H_
 
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+
 int exynos_drm_fbdev_init(struct drm_device *dev);
-int exynos_drm_fbdev_reinit(struct drm_device *dev);
 void exynos_drm_fbdev_fini(struct drm_device *dev);
 void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
+void exynos_drm_output_poll_changed(struct drm_device *dev);
+
+#else
+
+static inline int exynos_drm_fbdev_init(struct drm_device *dev)
+{
+       return 0;
+}
+
+static inline void exynos_drm_fbdev_fini(struct drm_device *dev)
+{
+}
+
+static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+
+#define exynos_drm_output_poll_changed (NULL)
+
+#endif
 
 #endif
index 51d484ae9f498c17a07ba43a0891ef8ebe0dd3a4..018449f8d55750d0546ac0891e78662a67a65ab3 100644 (file)
@@ -888,7 +888,7 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
         * clock. On these SoCs the bootloader may enable it but any
         * power domain off/on will reset it to disable state.
         */
-       if (ctx->driver_data != &exynos5_fimd_driver_data ||
+       if (ctx->driver_data != &exynos5_fimd_driver_data &&
            ctx->driver_data != &exynos5420_fimd_driver_data)
                return;
 
index 9869d70e9e54af32abb775dc0e17f0c1da83999b..a0def0be6d650de02fad25fc3cf3248d89b8fce0 100644 (file)
@@ -129,7 +129,7 @@ static void mic_set_path(struct exynos_mic *mic, bool enable)
        } else
                val &= ~(MIC0_RGB_MUX | MIC0_I80_MUX | MIC0_ON_MUX);
 
-       regmap_write(mic->sysreg, DSD_CFG_MUX, val);
+       ret = regmap_write(mic->sysreg, DSD_CFG_MUX, val);
        if (ret)
                DRM_ERROR("mic: Failed to read system register\n");
 }
@@ -457,6 +457,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
                                                        "samsung,disp-syscon");
        if (IS_ERR(mic->sysreg)) {
                DRM_ERROR("mic: Failed to get system register.\n");
+               ret = PTR_ERR(mic->sysreg);
                goto err;
        }
 
index d86227236f5519f93099565622956f00530b7e24..50185ac347b24352202b7d38b314222438ca9b20 100644 (file)
 
 #include <drm/drmP.h>
 
-#include <drm/exynos_drm.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/exynos_drm.h>
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_fb.h"
@@ -57,11 +58,12 @@ static int exynos_plane_get_size(int start, unsigned length, unsigned last)
 }
 
 static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
-
 {
        struct drm_plane_state *state = &exynos_state->base;
-       struct drm_crtc *crtc = exynos_state->base.crtc;
-       struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_crtc_state *crtc_state =
+                       drm_atomic_get_existing_crtc_state(state->state, crtc);
+       struct drm_display_mode *mode = &crtc_state->adjusted_mode;
        int crtc_x, crtc_y;
        unsigned int crtc_w, crtc_h;
        unsigned int src_x, src_y;
index 20e82008b8b6c683b9189d25699dc33a054b874a..30798cbc6fc08938cc2adc62089148d545c118a6 100644 (file)
@@ -758,10 +758,10 @@ static int i915_drm_resume(struct drm_device *dev)
                dev_priv->display.hpd_irq_setup(dev);
        spin_unlock_irq(&dev_priv->irq_lock);
 
-       intel_display_resume(dev);
-
        intel_dp_mst_resume(dev);
 
+       intel_display_resume(dev);
+
        /*
         * ... but also need to make sure that hotplug processing
         * doesn't cause havoc. Like in the driver load code we don't
index 10480939159c24435768a535b9f234fe00cbf387..daba7ebb969903d11dbf6d20a0e932bfc922f128 100644 (file)
@@ -2634,8 +2634,9 @@ struct drm_i915_cmd_table {
 
 /* WaRsDisableCoarsePowerGating:skl,bxt */
 #define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
-                                                ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
-                                                 IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
+                                                IS_SKL_GT3(dev) || \
+                                                IS_SKL_GT4(dev))
+
 /*
  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
  * even when in MSI mode. This results in spurious interrupt warnings if the
index 18ba8139e922898e89d11423899bc0b4176b73fe..4d30b60defda44adcbc24eea0be77d1e9278e804 100644 (file)
@@ -501,19 +501,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
        if (pvec != NULL) {
                struct mm_struct *mm = obj->userptr.mm->mm;
 
-               down_read(&mm->mmap_sem);
-               while (pinned < npages) {
-                       ret = get_user_pages_remote(work->task, mm,
-                                       obj->userptr.ptr + pinned * PAGE_SIZE,
-                                       npages - pinned,
-                                       !obj->userptr.read_only, 0,
-                                       pvec + pinned, NULL);
-                       if (ret < 0)
-                               break;
-
-                       pinned += ret;
+               ret = -EFAULT;
+               if (atomic_inc_not_zero(&mm->mm_users)) {
+                       down_read(&mm->mmap_sem);
+                       while (pinned < npages) {
+                               ret = get_user_pages_remote
+                                       (work->task, mm,
+                                        obj->userptr.ptr + pinned * PAGE_SIZE,
+                                        npages - pinned,
+                                        !obj->userptr.read_only, 0,
+                                        pvec + pinned, NULL);
+                               if (ret < 0)
+                                       break;
+
+                               pinned += ret;
+                       }
+                       up_read(&mm->mmap_sem);
+                       mmput(mm);
                }
-               up_read(&mm->mmap_sem);
        }
 
        mutex_lock(&dev->struct_mutex);
index d1a46ef5ab3f4b051b42599805266cd5fbd2bc7e..1c212205d0e7fd856e3298a10f6c039210bb570f 100644 (file)
@@ -1829,7 +1829,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
        disable_rpm_wakeref_asserts(dev_priv);
 
-       for (;;) {
+       do {
                master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
                iir = I915_READ(VLV_IIR);
 
@@ -1857,7 +1857,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 
                I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
                POSTING_READ(GEN8_MASTER_IRQ);
-       }
+       } while (0);
 
        enable_rpm_wakeref_asserts(dev_priv);
 
index a2bd698fe2f78f801813c99b097d7576e2f0c869..937e77228466eb22e66ac765d001078a418badfd 100644 (file)
@@ -506,6 +506,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_device *dev = connector->dev;
 
+       intel_connector->unregister(intel_connector);
+
        /* need to nuke the connector */
        drm_modeset_lock_all(dev);
        if (connector->state->crtc) {
@@ -519,11 +521,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 
                WARN(ret, "Disabling mst crtc failed with %i\n", ret);
        }
-       drm_modeset_unlock_all(dev);
 
-       intel_connector->unregister(intel_connector);
-
-       drm_modeset_lock_all(dev);
        intel_connector_remove_from_fbdev(intel_connector);
        drm_connector_cleanup(connector);
        drm_modeset_unlock_all(dev);
index 6a978ce8024436251009979cf1b1b47fa14ff549..5c6080fd09688aee82c24841fa9e4c114164f384 100644 (file)
@@ -841,11 +841,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
                if (unlikely(total_bytes > remain_usable)) {
                        /*
                         * The base request will fit but the reserved space
-                        * falls off the end. So only need to to wait for the
-                        * reserved size after flushing out the remainder.
+                        * falls off the end. So don't need an immediate wrap
+                        * and only need to effectively wait for the reserved
+                        * size space from the start of ringbuffer.
                         */
                        wait_bytes = remain_actual + ringbuf->reserved_size;
-                       need_wrap = true;
                } else if (total_bytes > ringbuf->space) {
                        /* No wrapping required, just waiting. */
                        wait_bytes = total_bytes;
@@ -1913,15 +1913,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
        struct intel_ringbuffer *ringbuf = request->ringbuf;
        int ret;
 
-       ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+       ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
        if (ret)
                return ret;
 
+       /* We're using qword write, seqno should be aligned to 8 bytes. */
+       BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
+
        /* w/a for post sync ops following a GPGPU operation we
         * need a prior CS_STALL, which is emitted by the flush
         * following the batch.
         */
-       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
+       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
        intel_logical_ring_emit(ringbuf,
                                (PIPE_CONTROL_GLOBAL_GTT_IVB |
                                 PIPE_CONTROL_CS_STALL |
@@ -1929,7 +1932,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
        intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
        intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+       /* We're thrashing one dword of HWS. */
+       intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+       intel_logical_ring_emit(ringbuf, MI_NOOP);
        return intel_logical_ring_advance_and_submit(request);
 }
 
index 30a8403a8f4fbaa25e8f55ca3655596bc298768e..cd9fe609aefbc2487ce94ab01e0d495dd12d8167 100644 (file)
@@ -478,11 +478,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
         * and as part of the cleanup in the hw state restore we also redisable
         * the vga plane.
         */
-       if (!HAS_PCH_SPLIT(dev)) {
-               drm_modeset_lock_all(dev);
+       if (!HAS_PCH_SPLIT(dev))
                intel_display_resume(dev);
-               drm_modeset_unlock_all(dev);
-       }
 
        dev_priv->modeset_restore = MODESET_DONE;
 
index 347d4df49a9bf37cc751d9bfdedde6701709b2f8..8ed3cf34f82d31bbefa98847d1413f625e2b2f74 100644 (file)
@@ -2876,25 +2876,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
                             const struct drm_plane_state *pstate,
                             int y)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+       struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
        struct drm_framebuffer *fb = pstate->fb;
+       uint32_t width = 0, height = 0;
+
+       width = drm_rect_width(&intel_pstate->src) >> 16;
+       height = drm_rect_height(&intel_pstate->src) >> 16;
+
+       if (intel_rotation_90_or_270(pstate->rotation))
+               swap(width, height);
 
        /* for planar format */
        if (fb->pixel_format == DRM_FORMAT_NV12) {
                if (y)  /* y-plane data rate */
-                       return intel_crtc->config->pipe_src_w *
-                               intel_crtc->config->pipe_src_h *
+                       return width * height *
                                drm_format_plane_cpp(fb->pixel_format, 0);
                else    /* uv-plane data rate */
-                       return (intel_crtc->config->pipe_src_w/2) *
-                               (intel_crtc->config->pipe_src_h/2) *
+                       return (width / 2) * (height / 2) *
                                drm_format_plane_cpp(fb->pixel_format, 1);
        }
 
        /* for packed formats */
-       return intel_crtc->config->pipe_src_w *
-               intel_crtc->config->pipe_src_h *
-               drm_format_plane_cpp(fb->pixel_format, 0);
+       return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
 }
 
 /*
@@ -2973,8 +2976,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                struct drm_framebuffer *fb = plane->state->fb;
                int id = skl_wm_plane_id(intel_plane);
 
-               if (fb == NULL)
+               if (!to_intel_plane_state(plane->state)->visible)
                        continue;
+
                if (plane->type == DRM_PLANE_TYPE_CURSOR)
                        continue;
 
@@ -3000,7 +3004,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                uint16_t plane_blocks, y_plane_blocks = 0;
                int id = skl_wm_plane_id(intel_plane);
 
-               if (pstate->fb == NULL)
+               if (!to_intel_plane_state(pstate)->visible)
                        continue;
                if (plane->type == DRM_PLANE_TYPE_CURSOR)
                        continue;
@@ -3123,26 +3127,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 {
        struct drm_plane *plane = &intel_plane->base;
        struct drm_framebuffer *fb = plane->state->fb;
+       struct intel_plane_state *intel_pstate =
+                                       to_intel_plane_state(plane->state);
        uint32_t latency = dev_priv->wm.skl_latency[level];
        uint32_t method1, method2;
        uint32_t plane_bytes_per_line, plane_blocks_per_line;
        uint32_t res_blocks, res_lines;
        uint32_t selected_result;
        uint8_t cpp;
+       uint32_t width = 0, height = 0;
 
-       if (latency == 0 || !cstate->base.active || !fb)
+       if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
                return false;
 
+       width = drm_rect_width(&intel_pstate->src) >> 16;
+       height = drm_rect_height(&intel_pstate->src) >> 16;
+
+       if (intel_rotation_90_or_270(plane->state->rotation))
+               swap(width, height);
+
        cpp = drm_format_plane_cpp(fb->pixel_format, 0);
        method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
                                 cpp, latency);
        method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
                                 cstate->base.adjusted_mode.crtc_htotal,
-                                cstate->pipe_src_w,
-                                cpp, fb->modifier[0],
+                                width,
+                                cpp,
+                                fb->modifier[0],
                                 latency);
 
-       plane_bytes_per_line = cstate->pipe_src_w * cpp;
+       plane_bytes_per_line = width * cpp;
        plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 
        if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
index 45ce45a5e122046047b2fadc75f5f6f89f34c379..9121646d7c4dd98c4c32ed74343948e662059d06 100644 (file)
@@ -968,7 +968,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
 
        /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
        tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
-       if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
+       if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
            IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
                tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
        WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
@@ -1085,7 +1085,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
                WA_SET_BIT_MASKED(HIZ_CHICKEN,
                                  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
+       /* This is tied to WaForceContextSaveRestoreNonCoherent */
+       if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
                /*
                 *Use Force Non-Coherent whenever executing a 3D context. This
                 * is a workaround for a possible hang in the unlikely event
@@ -2090,10 +2091,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj = ringbuf->obj;
+       /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+       unsigned flags = PIN_OFFSET_BIAS | 4096;
        int ret;
 
        if (HAS_LLC(dev_priv) && !obj->stolen) {
-               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
+               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
                if (ret)
                        return ret;
 
@@ -2109,7 +2112,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                        return -ENOMEM;
                }
        } else {
-               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
+                                           flags | PIN_MAPPABLE);
                if (ret)
                        return ret;
 
@@ -2454,11 +2458,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
                if (unlikely(total_bytes > remain_usable)) {
                        /*
                         * The base request will fit but the reserved space
-                        * falls off the end. So only need to to wait for the
-                        * reserved size after flushing out the remainder.
+                        * falls off the end. So don't need an immediate wrap
+                        * and only need to effectively wait for the reserved
+                        * size space from the start of ringbuffer.
                         */
                        wait_bytes = remain_actual + ringbuf->reserved_size;
-                       need_wrap = true;
                } else if (total_bytes > ringbuf->space) {
                        /* No wrapping required, just waiting. */
                        wait_bytes = total_bytes;
index 436d8f2b86823d30bd7f33bc2247d7d905a787ab..68b6f69aa6820d88952173e4aca801ef56370a7c 100644 (file)
@@ -1189,7 +1189,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
        } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                dev_priv->uncore.funcs.force_wake_get =
                        fw_domains_get_with_thread_status;
-               dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+               if (IS_HASWELL(dev))
+                       dev_priv->uncore.funcs.force_wake_put =
+                               fw_domains_put_with_fifo;
+               else
+                       dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
                               FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
        } else if (IS_IVYBRIDGE(dev)) {
index ae96ebc490fb2b80cbed26bacca113325c52fc82..e81aefe5ffa7c3c278cdda8253cdd228f228369e 100644 (file)
@@ -1276,18 +1276,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
                break;
        default:
                if (disp->dithering_mode) {
+                       nv_connector->dithering_mode = DITHERING_MODE_AUTO;
                        drm_object_attach_property(&connector->base,
                                                   disp->dithering_mode,
                                                   nv_connector->
                                                   dithering_mode);
-                       nv_connector->dithering_mode = DITHERING_MODE_AUTO;
                }
                if (disp->dithering_depth) {
+                       nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
                        drm_object_attach_property(&connector->base,
                                                   disp->dithering_depth,
                                                   nv_connector->
                                                   dithering_depth);
-                       nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
                }
                break;
        }
index c56a886229f196e86340a726e5b64431ba30dac2..b2de290da16feedf3834f31a32879ab315a586b5 100644 (file)
@@ -1832,6 +1832,8 @@ gf100_gr_init(struct gf100_gr *gr)
 
        gf100_gr_mmio(gr, gr->func->mmio);
 
+       nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
+
        memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
        for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
                do {
index 43e5f503d1c5c4dd9662a7b20aec4db5c061817d..030409a3ee4e014ab512b5141d4b4e0ededbfe5d 100644 (file)
@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
 
        qxl_bo_kunmap(user_bo);
 
+       qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
+       qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
+       qcrtc->hot_spot_x = hot_x;
+       qcrtc->hot_spot_y = hot_y;
+
        cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
        cmd->type = QXL_CURSOR_SET;
-       cmd->u.set.position.x = qcrtc->cur_x;
-       cmd->u.set.position.y = qcrtc->cur_y;
+       cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+       cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
 
        cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
 
@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
 
        cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
        cmd->type = QXL_CURSOR_MOVE;
-       cmd->u.position.x = qcrtc->cur_x;
-       cmd->u.position.y = qcrtc->cur_y;
+       cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+       cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
        qxl_release_unmap(qdev, release, &cmd->release_info);
 
        qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
index 6e6b9b1519b8edd376fbed8341dbe99374dba8cf..3f3897eb458ce326f39a32d6b324c4361d70747d 100644 (file)
@@ -135,6 +135,8 @@ struct qxl_crtc {
        int index;
        int cur_x;
        int cur_y;
+       int hot_spot_x;
+       int hot_spot_y;
 };
 
 struct qxl_output {
index da310a70c0f01b880f8bac9bf7bb2abe6da5d7a1..827ccc87cbc340bf4132a792f6436005f11111d6 100644 (file)
 #define NI_DP_MSE_SAT2                                 0x7398
 
 #define NI_DP_MSE_SAT_UPDATE                           0x739c
+#       define NI_DP_MSE_SAT_UPDATE_MASK               0x3
+#       define NI_DP_MSE_16_MTP_KEEPOUT                0x100
 
 #define NI_DIG_BE_CNTL                                 0x7140
 #       define NI_DIG_FE_SOURCE_SELECT(x)              (((x) & 0x7f) << 8)
index fd8c4d317e60c12c164b49b4142a6c6e3d7dabf4..95f4fea893021cb8f233244404fed6878742dd3b 100644 (file)
@@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
        return radeon_atpx_priv.atpx_detected;
 }
 
-bool radeon_has_atpx_dgpu_power_cntl(void) {
-       return radeon_atpx_priv.atpx.functions.power_cntl;
-}
-
 /**
  * radeon_atpx_call - call an ATPX method
  *
@@ -145,6 +141,13 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
  */
 static int radeon_atpx_validate(struct radeon_atpx *atpx)
 {
+       /* make sure required functions are enabled */
+       /* dGPU power control is required */
+       if (atpx->functions.power_cntl == false) {
+               printk("ATPX dGPU power cntl not present, forcing\n");
+               atpx->functions.power_cntl = true;
+       }
+
        if (atpx->functions.px_params) {
                union acpi_object *info;
                struct atpx_px_params output;
index cfcc099c537d0475da7eb6d6ce4576cc7a116b98..81a63d7f5cd9df6e41ef7748d4635a2cddf5b508 100644 (file)
@@ -2002,10 +2002,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                                                   rdev->mode_info.dither_property,
                                                   RADEON_FMT_DITHER_DISABLE);
 
-                       if (radeon_audio != 0)
+                       if (radeon_audio != 0) {
                                drm_object_attach_property(&radeon_connector->base.base,
                                                           rdev->mode_info.audio_property,
                                                           RADEON_AUDIO_AUTO);
+                               radeon_connector->audio = RADEON_AUDIO_AUTO;
+                       }
                        if (ASIC_IS_DCE5(rdev))
                                drm_object_attach_property(&radeon_connector->base.base,
                                                           rdev->mode_info.output_csc_property,
@@ -2130,6 +2132,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                drm_object_attach_property(&radeon_connector->base.base,
                                                           rdev->mode_info.audio_property,
                                                           RADEON_AUDIO_AUTO);
+                               radeon_connector->audio = RADEON_AUDIO_AUTO;
                        }
                        if (connector_type == DRM_MODE_CONNECTOR_DVII) {
                                radeon_connector->dac_load_detect = true;
@@ -2185,6 +2188,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                drm_object_attach_property(&radeon_connector->base.base,
                                                           rdev->mode_info.audio_property,
                                                           RADEON_AUDIO_AUTO);
+                               radeon_connector->audio = RADEON_AUDIO_AUTO;
                        }
                        if (ASIC_IS_DCE5(rdev))
                                drm_object_attach_property(&radeon_connector->base.base,
@@ -2237,6 +2241,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                drm_object_attach_property(&radeon_connector->base.base,
                                                           rdev->mode_info.audio_property,
                                                           RADEON_AUDIO_AUTO);
+                               radeon_connector->audio = RADEON_AUDIO_AUTO;
                        }
                        if (ASIC_IS_DCE5(rdev))
                                drm_object_attach_property(&radeon_connector->base.base,
index 4fd1a961012d8ff314386765edf72d0662660fe8..d0826fb0434c45f35647c5a41288c17634c3c95f 100644 (file)
@@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
        "LAST",
 };
 
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool radeon_has_atpx_dgpu_power_cntl(void);
-#else
-static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
-#endif
-
 #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
 #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
 
@@ -1305,9 +1299,9 @@ int radeon_device_init(struct radeon_device *rdev,
        }
        rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
 
-       DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
-               radeon_family_name[rdev->family], pdev->vendor, pdev->device,
-               pdev->subsystem_vendor, pdev->subsystem_device);
+       DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
+                radeon_family_name[rdev->family], pdev->vendor, pdev->device,
+                pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
 
        /* mutex initialization are all done here so we
         * can recall function without having locking issues */
@@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
         * ignore it */
        vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
 
-       if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
+       if (rdev->flags & RADEON_IS_PX)
                runtime = true;
        vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
        if (runtime)
index 43cffb526b0c604adbab9722f9e63501f51412db..de504ea29c06d355758e79c6ca18345f42e55f88 100644 (file)
@@ -89,8 +89,16 @@ static int radeon_dp_mst_set_stream_attrib(struct radeon_encoder *primary,
        WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1);
 
        do {
+               unsigned value1, value2;
+               udelay(10);
                temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset);
-       } while ((temp & 0x1) && retries++ < 10000);
+
+               value1 = temp & NI_DP_MSE_SAT_UPDATE_MASK;
+               value2 = temp & NI_DP_MSE_16_MTP_KEEPOUT;
+
+               if (!value1 && !value2)
+                       break;
+       } while (retries++ < 50);
 
        if (retries == 10000)
                DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset);
@@ -150,7 +158,7 @@ static int radeon_dp_mst_update_stream_attribs(struct radeon_connector *mst_conn
        return 0;
 }
 
-static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, uint32_t y)
+static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, s64 avg_time_slots_per_mtp)
 {
        struct drm_device *dev = mst->base.dev;
        struct radeon_device *rdev = dev->dev_private;
@@ -158,6 +166,8 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
        uint32_t val, temp;
        uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
        int retries = 0;
+       uint32_t x = drm_fixp2int(avg_time_slots_per_mtp);
+       uint32_t y = drm_fixp2int_ceil((avg_time_slots_per_mtp - x) << 26);
 
        val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y);
 
@@ -165,6 +175,7 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
 
        do {
                temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset);
+               udelay(10);
        } while ((temp & 0x1) && (retries++ < 10000));
 
        if (retries >= 10000)
@@ -246,14 +257,8 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
        kfree(radeon_connector);
 }
 
-static int radeon_connector_dpms(struct drm_connector *connector, int mode)
-{
-       DRM_DEBUG_KMS("\n");
-       return 0;
-}
-
 static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
-       .dpms = radeon_connector_dpms,
+       .dpms = drm_helper_connector_dpms,
        .detect = radeon_dp_mst_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = radeon_dp_mst_connector_destroy,
@@ -394,7 +399,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
        struct drm_crtc *crtc;
        struct radeon_crtc *radeon_crtc;
        int ret, slots;
-
+       s64 fixed_pbn, fixed_pbn_per_slot, avg_time_slots_per_mtp;
        if (!ASIC_IS_DCE5(rdev)) {
                DRM_ERROR("got mst dpms on non-DCE5\n");
                return;
@@ -456,7 +461,11 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
 
                mst_enc->enc_active = true;
                radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
-               radeon_dp_mst_set_vcp_size(radeon_encoder, slots, 0);
+
+               fixed_pbn = drm_int2fixp(mst_enc->pbn);
+               fixed_pbn_per_slot = drm_int2fixp(radeon_connector->mst_port->mst_mgr.pbn_div);
+               avg_time_slots_per_mtp = drm_fixp_div(fixed_pbn, fixed_pbn_per_slot);
+               radeon_dp_mst_set_vcp_size(radeon_encoder, avg_time_slots_per_mtp);
 
                atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0,
                                            mst_enc->fe);
index 7dddfdce85e6be56f5d6bab8787fad82aa120809..90f739478a1b5257b488d120245d77d4cf561a70 100644 (file)
@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
        struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
 
+       if (radeon_ttm_tt_has_userptr(bo->ttm))
+               return -EPERM;
        return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
 }
 
index af4df81c4e0c79c721dca5dc610da235d53651f9..e6abc09b67e3e63e82fea3bbda2b1c91234c525c 100644 (file)
@@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
        { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
        { 0, 0, 0, 0 },
 };
 
index bdb8cc89cacc73ea6442c9cf84de4180ded70772..4f9c5c6deaed189f6be5a34145a2ec5cf3fa40b7 100644 (file)
@@ -1979,6 +1979,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
index 5c0e43ed5c53ca28e554b7d833b093f545b8b440..c6eaff5f88451f770cf036e71b9167a0202351e7 100644 (file)
 #define USB_DEVICE_ID_SIDEWINDER_GV    0x003b
 #define USB_DEVICE_ID_MS_OFFICE_KB     0x0048
 #define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K 0x00b4
 #define USB_DEVICE_ID_MS_NE4K          0x00db
 #define USB_DEVICE_ID_MS_NE4K_JP       0x00dc
 #define USB_DEVICE_ID_MS_LK6K          0x00f9
 #define USB_DEVICE_ID_MS_PRESENTER_8K_USB      0x0713
 #define USB_DEVICE_ID_MS_NE7K          0x071d
 #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K      0x0730
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1 0x0732
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_600  0x0750
 #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500    0x076c
 #define USB_DEVICE_ID_MS_COMFORT_KEYBOARD 0x00e3
 #define USB_DEVICE_ID_MS_SURFACE_PRO_2   0x0799
index 0125e356bd8d29c0ad51b22d604f1df00f0f2cd7..1ac4ff4d57a659fc89c6a2bf36b83ba2d679fdcc 100644 (file)
@@ -184,21 +184,31 @@ static int lenovo_send_cmd_cptkbd(struct hid_device *hdev,
                        unsigned char byte2, unsigned char byte3)
 {
        int ret;
-       unsigned char buf[] = {0x18, byte2, byte3};
+       unsigned char *buf;
+
+       buf = kzalloc(3, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       buf[0] = 0x18;
+       buf[1] = byte2;
+       buf[2] = byte3;
 
        switch (hdev->product) {
        case USB_DEVICE_ID_LENOVO_CUSBKBD:
-               ret = hid_hw_raw_request(hdev, 0x13, buf, sizeof(buf),
+               ret = hid_hw_raw_request(hdev, 0x13, buf, 3,
                                        HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
                break;
        case USB_DEVICE_ID_LENOVO_CBTKBD:
-               ret = hid_hw_output_report(hdev, buf, sizeof(buf));
+               ret = hid_hw_output_report(hdev, buf, 3);
                break;
        default:
                ret = -EINVAL;
                break;
        }
 
+       kfree(buf);
+
        return ret < 0 ? ret : 0; /* BT returns 0, USB returns sizeof(buf) */
 }
 
index 75cd3bc59c549a2be01c8092412dafd24abf8c61..e924d555536cf7fb1b221af4c15e51112df107c8 100644 (file)
@@ -272,6 +272,12 @@ static const struct hid_device_id ms_devices[] = {
                .driver_data = MS_PRESENTER },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
                .driver_data = MS_ERGONOMY | MS_RDESC_3K },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K),
+               .driver_data = MS_ERGONOMY },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600),
+               .driver_data = MS_ERGONOMY },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1),
+               .driver_data = MS_ERGONOMY },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
                .driver_data = MS_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
index 25d3c4330bf69bbca32efb3883628f3af6a28875..c741f5e50a6681cd0b6267fc04c5ca11f1b0a1b5 100644 (file)
@@ -1169,6 +1169,7 @@ static void mt_release_contacts(struct hid_device *hid)
                                                           MT_TOOL_FINGER,
                                                           false);
                        }
+                       input_mt_sync_frame(input_dev);
                        input_sync(input_dev);
                }
        }
index 4390eee2ce84977427e7c3759f9e481895037858..c830ed39348f7cdba2b06b9dda01616e9405335a 100644 (file)
@@ -2049,9 +2049,11 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
         *   -----+------------------------------+-----+-----+
         * The single bits Yaw, Roll, Pitch in the lower right corner specify
         * whether the wiimote is rotating fast (0) or slow (1). Speed for slow
-        * roation is 440 deg/s and for fast rotation 2000 deg/s. To get a
-        * linear scale we multiply by 2000/440 = ~4.5454 which is 18 for fast
-        * and 9 for slow.
+        * roation is 8192/440 units / deg/s and for fast rotation 8192/2000
+        * units / deg/s. To get a linear scale for fast rotation we multiply
+        * by 2000/440 = ~4.5454 and scale both fast and slow by 9 to match the
+        * previous scale reported by this driver.
+        * This leaves a linear scale with 8192*9/440 (~167.564) units / deg/s.
         * If the wiimote is not rotating the sensor reports 2^13 = 8192.
         * Ext specifies whether an extension is connected to the motionp.
         * which is parsed by wiimote-core.
@@ -2070,15 +2072,15 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
        z -= 8192;
 
        if (!(ext[3] & 0x02))
-               x *= 18;
+               x = (x * 2000 * 9) / 440;
        else
                x *= 9;
        if (!(ext[4] & 0x02))
-               y *= 18;
+               y = (y * 2000 * 9) / 440;
        else
                y *= 9;
        if (!(ext[3] & 0x01))
-               z *= 18;
+               z = (z * 2000 * 9) / 440;
        else
                z *= 9;
 
index ad71160b9ea4341309030db0aea3aa2213ad71d3..ae83af649a607f67239f1a64bf45dd4b5770cc7d 100644 (file)
@@ -951,14 +951,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
        return ret;
 }
 
-static void usbhid_restart_queues(struct usbhid_device *usbhid)
-{
-       if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
-               usbhid_restart_out_queue(usbhid);
-       if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
-               usbhid_restart_ctrl_queue(usbhid);
-}
-
 static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
 {
        struct usbhid_device *usbhid = hid->driver_data;
@@ -1404,6 +1396,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
        usb_kill_urb(usbhid->urbout);
 }
 
+static void hid_restart_io(struct hid_device *hid)
+{
+       struct usbhid_device *usbhid = hid->driver_data;
+       int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
+       int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
+
+       spin_lock_irq(&usbhid->lock);
+       clear_bit(HID_SUSPENDED, &usbhid->iofl);
+       usbhid_mark_busy(usbhid);
+
+       if (clear_halt || reset_pending)
+               schedule_work(&usbhid->reset_work);
+       usbhid->retry_delay = 0;
+       spin_unlock_irq(&usbhid->lock);
+
+       if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
+               return;
+
+       if (!clear_halt) {
+               if (hid_start_in(hid) < 0)
+                       hid_io_error(hid);
+       }
+
+       spin_lock_irq(&usbhid->lock);
+       if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+               usbhid_restart_out_queue(usbhid);
+       if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+               usbhid_restart_ctrl_queue(usbhid);
+       spin_unlock_irq(&usbhid->lock);
+}
+
 /* Treat USB reset pretty much the same as suspend/resume */
 static int hid_pre_reset(struct usb_interface *intf)
 {
@@ -1453,14 +1476,14 @@ static int hid_post_reset(struct usb_interface *intf)
                return 1;
        }
 
+       /* No need to do another reset or clear a halted endpoint */
        spin_lock_irq(&usbhid->lock);
        clear_bit(HID_RESET_PENDING, &usbhid->iofl);
+       clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
        spin_unlock_irq(&usbhid->lock);
        hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
-       status = hid_start_in(hid);
-       if (status < 0)
-               hid_io_error(hid);
-       usbhid_restart_queues(usbhid);
+
+       hid_restart_io(hid);
 
        return 0;
 }
@@ -1483,25 +1506,9 @@ void usbhid_put_power(struct hid_device *hid)
 #ifdef CONFIG_PM
 static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
 {
-       struct usbhid_device *usbhid = hid->driver_data;
-       int status;
-
-       spin_lock_irq(&usbhid->lock);
-       clear_bit(HID_SUSPENDED, &usbhid->iofl);
-       usbhid_mark_busy(usbhid);
-
-       if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
-                       test_bit(HID_RESET_PENDING, &usbhid->iofl))
-               schedule_work(&usbhid->reset_work);
-       usbhid->retry_delay = 0;
-
-       usbhid_restart_queues(usbhid);
-       spin_unlock_irq(&usbhid->lock);
-
-       status = hid_start_in(hid);
-       if (status < 0)
-               hid_io_error(hid);
+       int status = 0;
 
+       hid_restart_io(hid);
        if (driver_suspended && hid->driver && hid->driver->resume)
                status = hid->driver->resume(hid);
        return status;
@@ -1570,12 +1577,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
 static int hid_resume(struct usb_interface *intf)
 {
        struct hid_device *hid = usb_get_intfdata (intf);
-       struct usbhid_device *usbhid = hid->driver_data;
        int status;
 
-       if (!test_bit(HID_STARTED, &usbhid->iofl))
-               return 0;
-
        status = hid_resume_common(hid, true);
        dev_dbg(&intf->dev, "resume status %d\n", status);
        return 0;
@@ -1584,10 +1587,8 @@ static int hid_resume(struct usb_interface *intf)
 static int hid_reset_resume(struct usb_interface *intf)
 {
        struct hid_device *hid = usb_get_intfdata(intf);
-       struct usbhid_device *usbhid = hid->driver_data;
        int status;
 
-       clear_bit(HID_SUSPENDED, &usbhid->iofl);
        status = hid_post_reset(intf);
        if (status >= 0 && hid->driver && hid->driver->reset_resume) {
                int ret = hid->driver->reset_resume(hid);
index 68a560957871c5a497e4e3d0229ca03dc359d8da..ccf1883318c37ff401c08a147d365ab2fbb0d5df 100644 (file)
@@ -152,6 +152,25 @@ static void wacom_feature_mapping(struct hid_device *hdev,
                hid_data->inputmode = field->report->id;
                hid_data->inputmode_index = usage->usage_index;
                break;
+
+       case HID_UP_DIGITIZER:
+               if (field->report->id == 0x0B &&
+                   (field->application == WACOM_G9_DIGITIZER ||
+                    field->application == WACOM_G11_DIGITIZER)) {
+                       wacom->wacom_wac.mode_report = field->report->id;
+                       wacom->wacom_wac.mode_value = 0;
+               }
+               break;
+
+       case WACOM_G9_PAGE:
+       case WACOM_G11_PAGE:
+               if (field->report->id == 0x03 &&
+                   (field->application == WACOM_G9_TOUCHSCREEN ||
+                    field->application == WACOM_G11_TOUCHSCREEN)) {
+                       wacom->wacom_wac.mode_report = field->report->id;
+                       wacom->wacom_wac.mode_value = 0;
+               }
+               break;
        }
 }
 
@@ -322,26 +341,41 @@ static int wacom_hid_set_device_mode(struct hid_device *hdev)
        return 0;
 }
 
-static int wacom_set_device_mode(struct hid_device *hdev, int report_id,
-               int length, int mode)
+static int wacom_set_device_mode(struct hid_device *hdev,
+                                struct wacom_wac *wacom_wac)
 {
-       unsigned char *rep_data;
+       u8 *rep_data;
+       struct hid_report *r;
+       struct hid_report_enum *re;
+       int length;
        int error = -ENOMEM, limit = 0;
 
-       rep_data = kzalloc(length, GFP_KERNEL);
+       if (wacom_wac->mode_report < 0)
+               return 0;
+
+       re = &(hdev->report_enum[HID_FEATURE_REPORT]);
+       r = re->report_id_hash[wacom_wac->mode_report];
+       if (!r)
+               return -EINVAL;
+
+       rep_data = hid_alloc_report_buf(r, GFP_KERNEL);
        if (!rep_data)
-               return error;
+               return -ENOMEM;
+
+       length = hid_report_len(r);
 
        do {
-               rep_data[0] = report_id;
-               rep_data[1] = mode;
+               rep_data[0] = wacom_wac->mode_report;
+               rep_data[1] = wacom_wac->mode_value;
 
                error = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data,
                                         length, 1);
                if (error >= 0)
                        error = wacom_get_report(hdev, HID_FEATURE_REPORT,
                                                 rep_data, length, 1);
-       } while (error >= 0 && rep_data[1] != mode && limit++ < WAC_MSG_RETRIES);
+       } while (error >= 0 &&
+                rep_data[1] != wacom_wac->mode_report &&
+                limit++ < WAC_MSG_RETRIES);
 
        kfree(rep_data);
 
@@ -411,32 +445,41 @@ static int wacom_bt_query_tablet_data(struct hid_device *hdev, u8 speed,
 static int wacom_query_tablet_data(struct hid_device *hdev,
                struct wacom_features *features)
 {
+       struct wacom *wacom = hid_get_drvdata(hdev);
+       struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
        if (hdev->bus == BUS_BLUETOOTH)
                return wacom_bt_query_tablet_data(hdev, 1, features);
 
-       if (features->type == HID_GENERIC)
-               return wacom_hid_set_device_mode(hdev);
-
-       if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
-               if (features->type > TABLETPC) {
-                       /* MT Tablet PC touch */
-                       return wacom_set_device_mode(hdev, 3, 4, 4);
-               }
-               else if (features->type == WACOM_24HDT) {
-                       return wacom_set_device_mode(hdev, 18, 3, 2);
-               }
-               else if (features->type == WACOM_27QHDT) {
-                       return wacom_set_device_mode(hdev, 131, 3, 2);
-               }
-               else if (features->type == BAMBOO_PAD) {
-                       return wacom_set_device_mode(hdev, 2, 2, 2);
-               }
-       } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
-               if (features->type <= BAMBOO_PT) {
-                       return wacom_set_device_mode(hdev, 2, 2, 2);
+       if (features->type != HID_GENERIC) {
+               if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
+                       if (features->type > TABLETPC) {
+                               /* MT Tablet PC touch */
+                               wacom_wac->mode_report = 3;
+                               wacom_wac->mode_value = 4;
+                       } else if (features->type == WACOM_24HDT) {
+                               wacom_wac->mode_report = 18;
+                               wacom_wac->mode_value = 2;
+                       } else if (features->type == WACOM_27QHDT) {
+                               wacom_wac->mode_report = 131;
+                               wacom_wac->mode_value = 2;
+                       } else if (features->type == BAMBOO_PAD) {
+                               wacom_wac->mode_report = 2;
+                               wacom_wac->mode_value = 2;
+                       }
+               } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
+                       if (features->type <= BAMBOO_PT) {
+                               wacom_wac->mode_report = 2;
+                               wacom_wac->mode_value = 2;
+                       }
                }
        }
 
+       wacom_set_device_mode(hdev, wacom_wac);
+
+       if (features->type == HID_GENERIC)
+               return wacom_hid_set_device_mode(hdev);
+
        return 0;
 }
 
@@ -1817,6 +1860,9 @@ static int wacom_probe(struct hid_device *hdev,
                goto fail_type;
        }
 
+       wacom_wac->hid_data.inputmode = -1;
+       wacom_wac->mode_report = -1;
+
        wacom->usbdev = dev;
        wacom->intf = intf;
        mutex_init(&wacom->lock);
index bd198bbd4df0841b5a3596c09ecac822ecda40e5..02c4efea241c02eb6270d19b40cc90256b04721a 100644 (file)
@@ -2425,6 +2425,17 @@ void wacom_setup_device_quirks(struct wacom *wacom)
                }
        }
 
+       /*
+        * Hack for the Bamboo One:
+        * the device presents a PAD/Touch interface as most Bamboos and even
+        * sends ghosts PAD data on it. However, later, we must disable this
+        * ghost interface, and we can not detect it unless we set it here
+        * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
+        */
+       if (features->type == BAMBOO_PEN &&
+           features->pktlen == WACOM_PKGLEN_BBTOUCH3)
+               features->device_type |= WACOM_DEVICETYPE_PAD;
+
        /*
         * Raw Wacom-mode pen and touch events both come from interface
         * 0, whose HID descriptor has an application usage of 0xFF0D
index 25baa7f295997f65759d8da1c6dc239c01258156..e2084d914c14aa3902b42e2cb852517c31327256 100644 (file)
 #define WACOM_DEVICETYPE_WL_MONITOR     0x0008
 
 #define WACOM_VENDORDEFINED_PEN                0xff0d0001
+#define WACOM_G9_PAGE                  0xff090000
+#define WACOM_G9_DIGITIZER             (WACOM_G9_PAGE | 0x02)
+#define WACOM_G9_TOUCHSCREEN           (WACOM_G9_PAGE | 0x11)
+#define WACOM_G11_PAGE                 0xff110000
+#define WACOM_G11_DIGITIZER            (WACOM_G11_PAGE | 0x02)
+#define WACOM_G11_TOUCHSCREEN          (WACOM_G11_PAGE | 0x11)
 
 #define WACOM_PEN_FIELD(f)     (((f)->logical == HID_DG_STYLUS) || \
                                 ((f)->physical == HID_DG_STYLUS) || \
@@ -238,6 +244,8 @@ struct wacom_wac {
        int ps_connected;
        u8 bt_features;
        u8 bt_high_speed;
+       int mode_report;
+       int mode_value;
        struct hid_data hid_data;
 };
 
index 374c129219ef0c48c8ff3004662c413d78ad2b4c..5efadad4615bf6d68c215897b15adbf9eab80b53 100644 (file)
@@ -92,6 +92,7 @@ struct iommu_dev_data {
        struct list_head dev_data_list;   /* For global dev_data_list */
        struct protection_domain *domain; /* Domain the device is bound to */
        u16 devid;                        /* PCI Device ID */
+       u16 alias;                        /* Alias Device ID */
        bool iommu_v2;                    /* Device can make use of IOMMUv2 */
        bool passthrough;                 /* Device is identity mapped */
        struct {
@@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
        return container_of(dom, struct protection_domain, domain);
 }
 
+static inline u16 get_device_id(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+
+       return PCI_DEVID(pdev->bus->number, pdev->devfn);
+}
+
 static struct iommu_dev_data *alloc_dev_data(u16 devid)
 {
        struct iommu_dev_data *dev_data;
@@ -203,6 +211,68 @@ out_unlock:
        return dev_data;
 }
 
+static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+       *(u16 *)data = alias;
+       return 0;
+}
+
+static u16 get_alias(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       u16 devid, ivrs_alias, pci_alias;
+
+       devid = get_device_id(dev);
+       ivrs_alias = amd_iommu_alias_table[devid];
+       pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
+
+       if (ivrs_alias == pci_alias)
+               return ivrs_alias;
+
+       /*
+        * DMA alias showdown
+        *
+        * The IVRS is fairly reliable in telling us about aliases, but it
+        * can't know about every screwy device.  If we don't have an IVRS
+        * reported alias, use the PCI reported alias.  In that case we may
+        * still need to initialize the rlookup and dev_table entries if the
+        * alias is to a non-existent device.
+        */
+       if (ivrs_alias == devid) {
+               if (!amd_iommu_rlookup_table[pci_alias]) {
+                       amd_iommu_rlookup_table[pci_alias] =
+                               amd_iommu_rlookup_table[devid];
+                       memcpy(amd_iommu_dev_table[pci_alias].data,
+                              amd_iommu_dev_table[devid].data,
+                              sizeof(amd_iommu_dev_table[pci_alias].data));
+               }
+
+               return pci_alias;
+       }
+
+       pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
+               "for device %s[%04x:%04x], kernel reported alias "
+               "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
+               PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
+               PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
+               PCI_FUNC(pci_alias));
+
+       /*
+        * If we don't have a PCI DMA alias and the IVRS alias is on the same
+        * bus, then the IVRS table may know about a quirk that we don't.
+        */
+       if (pci_alias == devid &&
+           PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
+               pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+               pdev->dma_alias_devfn = ivrs_alias & 0xff;
+               pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
+                       PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
+                       dev_name(dev));
+       }
+
+       return ivrs_alias;
+}
+
 static struct iommu_dev_data *find_dev_data(u16 devid)
 {
        struct iommu_dev_data *dev_data;
@@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
        return dev_data;
 }
 
-static inline u16 get_device_id(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-
-       return PCI_DEVID(pdev->bus->number, pdev->devfn);
-}
-
 static struct iommu_dev_data *get_dev_data(struct device *dev)
 {
        return dev->archdata.iommu;
@@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
        if (!dev_data)
                return -ENOMEM;
 
+       dev_data->alias = get_alias(dev);
+
        if (pci_iommuv2_capable(pdev)) {
                struct amd_iommu *iommu;
 
@@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
        u16 devid, alias;
 
        devid = get_device_id(dev);
-       alias = amd_iommu_alias_table[devid];
+       alias = get_alias(dev);
 
        memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
        memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
@@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
        int ret;
 
        iommu = amd_iommu_rlookup_table[dev_data->devid];
-       alias = amd_iommu_alias_table[dev_data->devid];
+       alias = dev_data->alias;
 
        ret = iommu_flush_dte(iommu, dev_data->devid);
        if (!ret && alias != dev_data->devid)
@@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
        bool ats;
 
        iommu = amd_iommu_rlookup_table[dev_data->devid];
-       alias = amd_iommu_alias_table[dev_data->devid];
+       alias = dev_data->alias;
        ats   = dev_data->ats.enabled;
 
        /* Update data structures */
@@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
                return;
 
        iommu = amd_iommu_rlookup_table[dev_data->devid];
-       alias = amd_iommu_alias_table[dev_data->devid];
+       alias = dev_data->alias;
 
        /* decrease reference counters */
        dev_data->domain->dev_iommu[iommu->index] -= 1;
index 2409e3bd3df21e973db1231360b7a50fba66efa1..7c39ac4b9c537df09128a0c106d3bfbd009f7e7f 100644 (file)
@@ -826,6 +826,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        if (smmu_domain->smmu)
                goto out_unlock;
 
+       /* We're bypassing these SIDs, so don't allocate an actual context */
+       if (domain->type == IOMMU_DOMAIN_DMA) {
+               smmu_domain->smmu = smmu;
+               goto out_unlock;
+       }
+
        /*
         * Mapping the requested stage onto what we support is surprisingly
         * complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -948,7 +954,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
        void __iomem *cb_base;
        int irq;
 
-       if (!smmu)
+       if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
                return;
 
        /*
@@ -1089,18 +1095,20 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 
-       /* Devices in an IOMMU group may already be configured */
-       ret = arm_smmu_master_configure_smrs(smmu, cfg);
-       if (ret)
-               return ret == -EEXIST ? 0 : ret;
-
        /*
         * FIXME: This won't be needed once we have IOMMU-backed DMA ops
-        * for all devices behind the SMMU.
+        * for all devices behind the SMMU. Note that we need to take
+        * care configuring SMRs for devices both a platform_device and
+        * and a PCI device (i.e. a PCI host controller)
         */
        if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
                return 0;
 
+       /* Devices in an IOMMU group may already be configured */
+       ret = arm_smmu_master_configure_smrs(smmu, cfg);
+       if (ret)
+               return ret == -EEXIST ? 0 : ret;
+
        for (i = 0; i < cfg->num_streamids; ++i) {
                u32 idx, s2cr;
 
index 0d29b5a6356d729a6ceb987bccc8c2dbe6779823..99e5f9751e8b1746835b28c4bd4e2a5d1b53fd14 100644 (file)
@@ -715,6 +715,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
        if (!maddr || maddr->family != AF_ISDN)
                return -EINVAL;
 
+       if (addr_len < sizeof(struct sockaddr_mISDN))
+               return -EINVAL;
+
        lock_sock(sk);
 
        if (_pms(sk)->dev) {
index eb934b0242e0e17652b7fc17c3255b5e70e40d15..67392b6ab845fbdec7d54798b591c24e544347a9 100644 (file)
@@ -331,7 +331,7 @@ void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
  * Actually now I think of it, it's possible that Ron *is* half the Plan 9
  * userbase.  Oh well.
  */
-static bool could_be_syscall(unsigned int num)
+bool could_be_syscall(unsigned int num)
 {
        /* Normal Linux IA32_SYSCALL_VECTOR or reserved vector? */
        return num == IA32_SYSCALL_VECTOR || num == syscall_vector;
@@ -416,6 +416,10 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
  *
  * This routine indicates if a particular trap number could be delivered
  * directly.
+ *
+ * Unfortunately, Linux 4.6 started using an interrupt gate instead of a
+ * trap gate for syscalls, so this trick is ineffective.  See Mastery for
+ * how we could do this anyway...
  */
 static bool direct_trap(unsigned int num)
 {
index ac8ad0461e809db526bfc1db1ca2290ac612a43d..69b3814afd2f63c7fabe82dde698fae44b2d1862 100644 (file)
@@ -167,6 +167,7 @@ void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
 bool send_notify_to_eventfd(struct lg_cpu *cpu);
 void init_clockdev(struct lg_cpu *cpu);
 bool check_syscall_vector(struct lguest *lg);
+bool could_be_syscall(unsigned int num);
 int init_interrupts(void);
 void free_interrupts(void);
 
index 6a4cd771a2be62b4172cc26a178ca85fbf7e6d27..adc162c7040d7ef0a2f8f738e7a21bab1d57ba09 100644 (file)
@@ -429,8 +429,12 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
                        return;
                break;
        case 32 ... 255:
+               /* This might be a syscall. */
+               if (could_be_syscall(cpu->regs->trapnum))
+                       break;
+
                /*
-                * These values mean a real interrupt occurred, in which case
+                * Other values mean a real interrupt occurred, in which case
                 * the Host handler has already been run. We just do a
                 * friendly check if another process should now be run, then
                 * return to run the Guest again.
index dc11bbf27274380ecf60b55491193363d947e5c0..58d04726cdd7dc13f0faad15aeec89214ffce620 100644 (file)
@@ -46,7 +46,6 @@ static ssize_t mbox_test_signal_write(struct file *filp,
                                       size_t count, loff_t *ppos)
 {
        struct mbox_test_device *tdev = filp->private_data;
-       int ret;
 
        if (!tdev->tx_channel) {
                dev_err(tdev->dev, "Channel cannot do Tx\n");
@@ -60,17 +59,20 @@ static ssize_t mbox_test_signal_write(struct file *filp,
                return -EINVAL;
        }
 
-       tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
-       if (!tdev->signal)
-               return -ENOMEM;
+       /* Only allocate memory if we need to */
+       if (!tdev->signal) {
+               tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
+               if (!tdev->signal)
+                       return -ENOMEM;
+       }
 
-       ret = copy_from_user(tdev->signal, userbuf, count);
-       if (ret) {
+       if (copy_from_user(tdev->signal, userbuf, count)) {
                kfree(tdev->signal);
+               tdev->signal = NULL;
                return -EFAULT;
        }
 
-       return ret < 0 ? ret : count;
+       return count;
 }
 
 static const struct file_operations mbox_test_signal_ops = {
index bd07f39f06926dad75c7dfc4759c449b86af3ce2..dd2afbca51c9b8d2c155003548d31da28f81c3ce 100644 (file)
@@ -189,8 +189,8 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
        int i;
 
        ctx = devm_kzalloc(&pdev->dev, sizeof(struct slimpro_mbox), GFP_KERNEL);
-       if (IS_ERR(ctx))
-               return PTR_ERR(ctx);
+       if (!ctx)
+               return -ENOMEM;
 
        platform_set_drvdata(pdev, ctx);
 
index 6a4811f857056aaa64baff75265902b59be12ac9..4a36632c236f09ad9cdde06ff213961c712c3000 100644 (file)
@@ -375,13 +375,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
 
        if (!np) {
                dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
-               return ERR_PTR(-ENOSYS);
+               return ERR_PTR(-EINVAL);
        }
 
        if (!of_get_property(np, "mbox-names", NULL)) {
                dev_err(cl->dev,
                        "%s() requires an \"mbox-names\" property\n", __func__);
-               return ERR_PTR(-ENOSYS);
+               return ERR_PTR(-EINVAL);
        }
 
        of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
index 27f2ef300f8bb10bac594b094e3fafcc8db407d0..3970cda10080988887bfa07329241d94afd1b883 100644 (file)
@@ -867,39 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
        return 0;
 }
 
-#define WRITE_LOCK(cmd)        \
-       down_write(&cmd->root_lock); \
-       if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
-               up_write(&cmd->root_lock); \
-               return -EINVAL; \
+static bool cmd_write_lock(struct dm_cache_metadata *cmd)
+{
+       down_write(&cmd->root_lock);
+       if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
+               up_write(&cmd->root_lock);
+               return false;
        }
+       return true;
+}
 
-#define WRITE_LOCK_VOID(cmd) \
-       down_write(&cmd->root_lock); \
-       if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
-               up_write(&cmd->root_lock); \
-               return; \
-       }
+#define WRITE_LOCK(cmd)                                \
+       do {                                    \
+               if (!cmd_write_lock((cmd)))     \
+                       return -EINVAL;         \
+       } while(0)
+
+#define WRITE_LOCK_VOID(cmd)                   \
+       do {                                    \
+               if (!cmd_write_lock((cmd)))     \
+                       return;                 \
+       } while(0)
 
 #define WRITE_UNLOCK(cmd) \
-       up_write(&cmd->root_lock)
+       up_write(&(cmd)->root_lock)
 
-#define READ_LOCK(cmd) \
-       down_read(&cmd->root_lock); \
-       if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
-               up_read(&cmd->root_lock); \
-               return -EINVAL; \
+static bool cmd_read_lock(struct dm_cache_metadata *cmd)
+{
+       down_read(&cmd->root_lock);
+       if (cmd->fail_io) {
+               up_read(&cmd->root_lock);
+               return false;
        }
+       return true;
+}
 
-#define READ_LOCK_VOID(cmd)    \
-       down_read(&cmd->root_lock); \
-       if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
-               up_read(&cmd->root_lock); \
-               return; \
-       }
+#define READ_LOCK(cmd)                         \
+       do {                                    \
+               if (!cmd_read_lock((cmd)))      \
+                       return -EINVAL;         \
+       } while(0)
+
+#define READ_LOCK_VOID(cmd)                    \
+       do {                                    \
+               if (!cmd_read_lock((cmd)))      \
+                       return;                 \
+       } while(0)
 
 #define READ_UNLOCK(cmd) \
-       up_read(&cmd->root_lock)
+       up_read(&(cmd)->root_lock)
 
 int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
 {
index be4905769a45637a4882af087544578cdb90efa4..3d3ac13287a4570847ae179a483fe13586e96f66 100644 (file)
@@ -1662,8 +1662,10 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
                tio = alloc_tio(ci, ti, target_bio_nr);
                tio->len_ptr = len;
                r = clone_bio(tio, bio, sector, *len);
-               if (r < 0)
+               if (r < 0) {
+                       free_tio(ci->md, tio);
                        break;
+               }
                __map_bio(tio);
        }
 
index 5f1a36b8fbb082af1d0251504e19a33e1dc8c621..0a5cbbe12452da62127af8ced48101bb9fc212b8 100644 (file)
@@ -458,8 +458,10 @@ static void lkdtm_do_action(enum ctype which)
                        break;
 
                val = kmalloc(len, GFP_KERNEL);
-               if (!val)
+               if (!val) {
+                       kfree(base);
                        break;
+               }
 
                *val = 0x12345678;
                base[offset] = *val;
@@ -498,14 +500,17 @@ static void lkdtm_do_action(enum ctype which)
        }
        case CT_READ_BUDDY_AFTER_FREE: {
                unsigned long p = __get_free_page(GFP_KERNEL);
-               int saw, *val = kmalloc(1024, GFP_KERNEL);
+               int saw, *val;
                int *base;
 
                if (!p)
                        break;
 
-               if (!val)
+               val = kmalloc(1024, GFP_KERNEL);
+               if (!val) {
+                       free_page(p);
                        break;
+               }
 
                base = (int *)p;
 
index 3bdbe50a363f3250958887170b7162f3de4711f7..8a0147dfed27d864d419de730499960c54bc202c 100644 (file)
@@ -86,7 +86,6 @@ static int max_devices;
 
 /* TODO: Replace these with struct ida */
 static DECLARE_BITMAP(dev_use, MAX_DEVICES);
-static DECLARE_BITMAP(name_use, MAX_DEVICES);
 
 /*
  * There is one mmc_blk_data per slot.
@@ -105,7 +104,6 @@ struct mmc_blk_data {
        unsigned int    usage;
        unsigned int    read_only;
        unsigned int    part_type;
-       unsigned int    name_idx;
        unsigned int    reset_done;
 #define MMC_BLK_READ           BIT(0)
 #define MMC_BLK_WRITE          BIT(1)
@@ -2202,19 +2200,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
                goto out;
        }
 
-       /*
-        * !subname implies we are creating main mmc_blk_data that will be
-        * associated with mmc_card with dev_set_drvdata. Due to device
-        * partitions, devidx will not coincide with a per-physical card
-        * index anymore so we keep track of a name index.
-        */
-       if (!subname) {
-               md->name_idx = find_first_zero_bit(name_use, max_devices);
-               __set_bit(md->name_idx, name_use);
-       } else
-               md->name_idx = ((struct mmc_blk_data *)
-                               dev_to_disk(parent)->private_data)->name_idx;
-
        md->area_type = area_type;
 
        /*
@@ -2264,7 +2249,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
         */
 
        snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
-                "mmcblk%u%s", md->name_idx, subname ? subname : "");
+                "mmcblk%u%s", card->host->index, subname ? subname : "");
 
        if (mmc_card_mmc(card))
                blk_queue_logical_block_size(md->queue.queue,
@@ -2418,7 +2403,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
        struct list_head *pos, *q;
        struct mmc_blk_data *part_md;
 
-       __clear_bit(md->name_idx, name_use);
        list_for_each_safe(pos, q, &md->part) {
                part_md = list_entry(pos, struct mmc_blk_data, part);
                list_del(pos);
index f8c4762bb48dbff5a2a826e57352f1dfc12d05bc..bcc0de47fe7e1833254c1e8fcb95eed112c42031 100644 (file)
@@ -382,14 +382,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
        .pdata = &sdhci_tegra114_pdata,
 };
 
-static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
-       .pdata = &sdhci_tegra114_pdata,
-       .nvquirks = NVQUIRK_ENABLE_SDR50 |
-                   NVQUIRK_ENABLE_DDR50 |
-                   NVQUIRK_ENABLE_SDR104 |
-                   NVQUIRK_HAS_PADCALIB,
-};
-
 static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
        .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
                  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
@@ -407,7 +399,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
 
 static const struct of_device_id sdhci_tegra_dt_match[] = {
        { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
-       { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
+       { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
        { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
        { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
        { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
index b6facac54fc0f18c30dc7adced01223d1f076c9a..557b8462f55ed6f46961d2201affedd2c2746401 100644 (file)
@@ -4009,7 +4009,6 @@ static int nand_dt_init(struct nand_chip *chip)
  * This is the first phase of the normal nand_scan() function. It reads the
  * flash ID and sets up MTD fields accordingly.
  *
- * The mtd->owner field must be set to the module of the caller.
  */
 int nand_scan_ident(struct mtd_info *mtd, int maxchips,
                    struct nand_flash_dev *table)
@@ -4429,19 +4428,12 @@ EXPORT_SYMBOL(nand_scan_tail);
  *
  * This fills out all the uninitialized function pointers with the defaults.
  * The flash ID is read and the mtd/chip structures are filled with the
- * appropriate values. The mtd->owner field must be set to the module of the
- * caller.
+ * appropriate values.
  */
 int nand_scan(struct mtd_info *mtd, int maxchips)
 {
        int ret;
 
-       /* Many callers got this wrong, so check for it for a while... */
-       if (!mtd->owner && caller_is_module()) {
-               pr_crit("%s called with NULL mtd->owner!\n", __func__);
-               BUG();
-       }
-
        ret = nand_scan_ident(mtd, maxchips, NULL);
        if (!ret)
                ret = nand_scan_tail(mtd);
index 2a1ba62b7da20ff921ec36005a8b4d40ec17756b..a24c18eee5987b07287e9227c298f39ae5e97100 100644 (file)
@@ -195,6 +195,7 @@ config GENEVE
 
 config MACSEC
        tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
+       select CRYPTO
        select CRYPTO_AES
        select CRYPTO_GCM
        ---help---
index 50454be86570d61c40863ca8cad899c2034a316b..a2904029cccc2949b90d66cc8fb7d3fcbc11f103 100644 (file)
@@ -2181,27 +2181,10 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
                               struct net_device *bridge)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       u16 fid;
        int i, err;
 
        mutex_lock(&ps->smi_mutex);
 
-       /* Get or create the bridge FID and assign it to the port */
-       for (i = 0; i < ps->num_ports; ++i)
-               if (ps->ports[i].bridge_dev == bridge)
-                       break;
-
-       if (i < ps->num_ports)
-               err = _mv88e6xxx_port_fid_get(ds, i, &fid);
-       else
-               err = _mv88e6xxx_fid_new(ds, &fid);
-       if (err)
-               goto unlock;
-
-       err = _mv88e6xxx_port_fid_set(ds, port, fid);
-       if (err)
-               goto unlock;
-
        /* Assign the bridge and remap each port's VLANTable */
        ps->ports[port].bridge_dev = bridge;
 
@@ -2213,7 +2196,6 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
                }
        }
 
-unlock:
        mutex_unlock(&ps->smi_mutex);
 
        return err;
@@ -2223,16 +2205,10 @@ void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        struct net_device *bridge = ps->ports[port].bridge_dev;
-       u16 fid;
        int i;
 
        mutex_lock(&ps->smi_mutex);
 
-       /* Give the port a fresh Filtering Information Database */
-       if (_mv88e6xxx_fid_new(ds, &fid) ||
-           _mv88e6xxx_port_fid_set(ds, port, fid))
-               netdev_warn(ds->ports[port], "failed to assign a new FID\n");
-
        /* Unassign the bridge and remap each port's VLANTable */
        ps->ports[port].bridge_dev = NULL;
 
@@ -2476,9 +2452,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
         * the other bits clear.
         */
        reg = 1 << port;
-       /* Disable learning for DSA and CPU ports */
-       if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
-               reg = PORT_ASSOC_VECTOR_LOCKED_PORT;
+       /* Disable learning for CPU port */
+       if (dsa_is_cpu_port(ds, port))
+               reg = 0;
 
        ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
        if (ret)
@@ -2558,11 +2534,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
        if (ret)
                goto abort;
 
-       /* Port based VLAN map: give each port its own address
+       /* Port based VLAN map: give each port the same default address
         * database, and allow bidirectional communication between the
         * CPU and DSA port(s), and the other ports.
         */
-       ret = _mv88e6xxx_port_fid_set(ds, port, port + 1);
+       ret = _mv88e6xxx_port_fid_set(ds, port, 0);
        if (ret)
                goto abort;
 
index 8f76f4558a88c15b0a14bb6ec3d9fb769fea9d0a..2ff465848b6553ecc0a646421e30cded0e8fb053 100644 (file)
@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = -EIO;
 
-       netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
+       netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
        netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
 
        /* Init PHY as early as possible due to power saving issue  */
index 99b30a952b38736a825efc5149aa9027f6fae64d..38db2e4d7d540d27748ca05748c412b4a4d0215b 100644 (file)
@@ -1572,6 +1572,11 @@ static int bgmac_probe(struct bcma_device *core)
                dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
        }
 
+       /* This (reset &) enable is not preset in specs or reference driver but
+        * Broadcom does it in arch PCI code when enabling fake PCI device.
+        */
+       bcma_core_enable(core, 0);
+
        /* Allocation and references */
        net_dev = alloc_etherdev(sizeof(*bgmac));
        if (!net_dev)
index 4fbb093e0d844031dd86cc3d7253eb44d10686d6..9a03c142b742502051b7e98a0a30c600684d8f80 100644 (file)
 #define  BGMAC_CMDCFG_TAI                      0x00000200
 #define  BGMAC_CMDCFG_HD                       0x00000400      /* Set if in half duplex mode */
 #define  BGMAC_CMDCFG_HD_SHIFT                 10
-#define  BGMAC_CMDCFG_SR_REV0                  0x00000800      /* Set to reset mode, for other revs */
-#define  BGMAC_CMDCFG_SR_REV4                  0x00002000      /* Set to reset mode, only for core rev 4 */
-#define  BGMAC_CMDCFG_SR(rev)  ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
+#define  BGMAC_CMDCFG_SR_REV0                  0x00000800      /* Set to reset mode, for core rev 0-3 */
+#define  BGMAC_CMDCFG_SR_REV4                  0x00002000      /* Set to reset mode, for core rev >= 4 */
+#define  BGMAC_CMDCFG_SR(rev)  ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
 #define  BGMAC_CMDCFG_ML                       0x00008000      /* Set to activate mac loopback mode */
 #define  BGMAC_CMDCFG_AE                       0x00400000
 #define  BGMAC_CMDCFG_CFE                      0x00800000
index cf6445d148ca5098c5ed4a7ab443022fe212ae27..44ad1490b4726175ea8f5697b3a759e5cc7befcb 100644 (file)
@@ -878,7 +878,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
                else
                        p = (char *)priv;
                p += s->stat_offset;
-               data[i] = *(u32 *)p;
+               if (sizeof(unsigned long) != sizeof(u32) &&
+                   s->stat_sizeof == sizeof(unsigned long))
+                       data[i] = *(unsigned long *)p;
+               else
+                       data[i] = *(u32 *)p;
        }
 }
 
index 967951582e033d8a34a3463d5047f15bb7c26219..d20539a6d162bd5f0dbbd9c79d89e6784e37b969 100644 (file)
@@ -1011,10 +1011,11 @@ static int bgx_init_of_phy(struct bgx *bgx)
                }
 
                lmac++;
-               if (lmac == MAX_LMAC_PER_BGX)
+               if (lmac == MAX_LMAC_PER_BGX) {
+                       of_node_put(node);
                        break;
+               }
        }
-       of_node_put(node);
        return 0;
 
 defer:
index 984a3cc26f86ddb7a194b4738a3dd7e018b0bc33..326d4009525e1abf5cae7674c29b65a7d76af66a 100644 (file)
@@ -1451,6 +1451,9 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
               unsigned int mmd, unsigned int reg, u16 *valp);
 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
               unsigned int mmd, unsigned int reg, u16 val);
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+              unsigned int vf, unsigned int iqtype, unsigned int iqid,
+              unsigned int fl0id, unsigned int fl1id);
 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
               unsigned int vf, unsigned int iqtype, unsigned int iqid,
               unsigned int fl0id, unsigned int fl1id);
index 13b144bcf725ec29d42dbdc1b33fc0c39c1fe919..6278e5a74b74d3f538f60e327e635d9ba9b9c0cd 100644 (file)
@@ -2981,14 +2981,28 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
 void t4_free_sge_resources(struct adapter *adap)
 {
        int i;
-       struct sge_eth_rxq *eq = adap->sge.ethrxq;
-       struct sge_eth_txq *etq = adap->sge.ethtxq;
+       struct sge_eth_rxq *eq;
+       struct sge_eth_txq *etq;
+
+       /* stop all Rx queues in order to start them draining */
+       for (i = 0; i < adap->sge.ethqsets; i++) {
+               eq = &adap->sge.ethrxq[i];
+               if (eq->rspq.desc)
+                       t4_iq_stop(adap, adap->mbox, adap->pf, 0,
+                                  FW_IQ_TYPE_FL_INT_CAP,
+                                  eq->rspq.cntxt_id,
+                                  eq->fl.size ? eq->fl.cntxt_id : 0xffff,
+                                  0xffff);
+       }
 
        /* clean up Ethernet Tx/Rx queues */
-       for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
+       for (i = 0; i < adap->sge.ethqsets; i++) {
+               eq = &adap->sge.ethrxq[i];
                if (eq->rspq.desc)
                        free_rspq_fl(adap, &eq->rspq,
                                     eq->fl.size ? &eq->fl : NULL);
+
+               etq = &adap->sge.ethtxq[i];
                if (etq->q.desc) {
                        t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
                                       etq->q.cntxt_id);
index cc1736bece0faabe22f04a828f7d1740bf9462de..71586a3e0f613a83118281e5eb80f369d4c567c9 100644 (file)
@@ -2557,6 +2557,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
 }
 
 #define EEPROM_STAT_ADDR   0x7bfc
+#define VPD_SIZE           0x800
 #define VPD_BASE           0x400
 #define VPD_BASE_OLD       0
 #define VPD_LEN            1024
@@ -2594,6 +2595,15 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
        if (!vpd)
                return -ENOMEM;
 
+       /* We have two VPD data structures stored in the adapter VPD area.
+        * By default, Linux calculates the size of the VPD area by traversing
+        * the first VPD area at offset 0x0, so we need to tell the OS what
+        * our real VPD size is.
+        */
+       ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
+       if (ret < 0)
+               goto out;
+
        /* Card information normally starts at VPD_BASE but early cards had
         * it at 0.
         */
@@ -6939,6 +6949,39 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
+/**
+ *     t4_iq_stop - stop an ingress queue and its FLs
+ *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW command
+ *     @pf: the PF owning the queues
+ *     @vf: the VF owning the queues
+ *     @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ *     @iqid: ingress queue id
+ *     @fl0id: FL0 queue id or 0xffff if no attached FL0
+ *     @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ *     Stops an ingress queue and its associated FLs, if any.  This causes
+ *     any current or future data/messages destined for these queues to be
+ *     tossed.
+ */
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+              unsigned int vf, unsigned int iqtype, unsigned int iqid,
+              unsigned int fl0id, unsigned int fl1id)
+{
+       struct fw_iq_cmd c;
+
+       memset(&c, 0, sizeof(c));
+       c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+                                 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
+                                 FW_IQ_CMD_VFN_V(vf));
+       c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
+       c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
+       c.iqid = cpu_to_be16(iqid);
+       c.fl0id = cpu_to_be16(fl0id);
+       c.fl1id = cpu_to_be16(fl1id);
+       return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
 /**
  *     t4_iq_free - free an ingress queue and its FLs
  *     @adap: the adapter
index 62ccebc5f7288ceb79262b7c01f6162fd675ac20..8cf943db56627db406b7a5c76b701d72fbc540f5 100644 (file)
@@ -1223,18 +1223,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
                if (err)
                        return err;
 
-               /* verify upper 16 bits are zero */
-               if (vid >> 16)
-                       return FM10K_ERR_PARAM;
-
                set = !(vid & FM10K_VLAN_CLEAR);
                vid &= ~FM10K_VLAN_CLEAR;
 
-               err = fm10k_iov_select_vid(vf_info, (u16)vid);
-               if (err < 0)
-                       return err;
+               /* if the length field has been set, this is a multi-bit
+                * update request. For multi-bit requests, simply disallow
+                * them when the pf_vid has been set. In this case, the PF
+                * should have already cleared the VLAN_TABLE, and if we
+                * allowed them, it could allow a rogue VF to receive traffic
+                * on a VLAN it was not assigned. In the single-bit case, we
+                * need to modify requests for VLAN 0 to use the default PF or
+                * SW vid when assigned.
+                */
 
-               vid = err;
+               if (vid >> 16) {
+                       /* prevent multi-bit requests when PF has
+                        * administratively set the VLAN for this VF
+                        */
+                       if (vf_info->pf_vid)
+                               return FM10K_ERR_PARAM;
+               } else {
+                       err = fm10k_iov_select_vid(vf_info, (u16)vid);
+                       if (err < 0)
+                               return err;
+
+                       vid = err;
+               }
 
                /* update VSI info for VF in regards to VLAN table */
                err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
index 084d0ab316b796b5d5af2ab87b7a5bee193b5a84..6a49b7ae511c1d0440326ba2cd43d477d6738a7c 100644 (file)
@@ -2594,35 +2594,34 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 }
 
 /**
- * __i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
  * @skb:      send buffer
  *
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
  **/
 bool __i40e_chk_linearize(struct sk_buff *skb)
 {
        const struct skb_frag_struct *frag, *stale;
-       int gso_size, nr_frags, sum;
-
-       /* check to see if TSO is enabled, if so we may get a repreive */
-       gso_size = skb_shinfo(skb)->gso_size;
-       if (unlikely(!gso_size))
-               return true;
+       int nr_frags, sum;
 
-       /* no need to check if number of frags is less than 8 */
+       /* no need to check if number of frags is less than 7 */
        nr_frags = skb_shinfo(skb)->nr_frags;
-       if (nr_frags < I40E_MAX_BUFFER_TXD)
+       if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
                return false;
 
        /* We need to walk through the list and validate that each group
         * of 6 fragments totals at least gso_size.  However we don't need
-        * to perform such validation on the first or last 6 since the first
-        * 6 cannot inherit any data from a descriptor before them, and the
-        * last 6 cannot inherit any data from a descriptor after them.
+        * to perform such validation on the last 6 since the last 6 cannot
+        * inherit any data from a descriptor after them.
         */
-       nr_frags -= I40E_MAX_BUFFER_TXD - 1;
+       nr_frags -= I40E_MAX_BUFFER_TXD - 2;
        frag = &skb_shinfo(skb)->frags[0];
 
        /* Initialize size to the negative value of gso_size minus 1.  We
@@ -2631,21 +2630,21 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
         * descriptors for a single transmit as the header and previous
         * fragment are already consuming 2 descriptors.
         */
-       sum = 1 - gso_size;
+       sum = 1 - skb_shinfo(skb)->gso_size;
 
-       /* Add size of frags 1 through 5 to create our initial sum */
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
+       /* Add size of frags 0 through 4 to create our initial sum */
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
 
        /* Walk through fragments adding latest fragment, testing it, and
         * then removing stale fragments from the sum.
         */
        stale = &skb_shinfo(skb)->frags[0];
        for (;;) {
-               sum += skb_frag_size(++frag);
+               sum += skb_frag_size(frag++);
 
                /* if sum is negative we failed to make sufficient progress */
                if (sum < 0)
@@ -2655,7 +2654,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
                if (!--nr_frags)
                        break;
 
-               sum -= skb_frag_size(++stale);
+               sum -= skb_frag_size(stale++);
        }
 
        return false;
index cdd5dc00aec519972dd3af6166666a0d3c65766f..a9bd70537d6547b550a591b7775a8fcf4c76f186 100644 (file)
@@ -413,10 +413,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  **/
 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
 {
-       /* we can only support up to 8 data buffers for a single send */
-       if (likely(count <= I40E_MAX_BUFFER_TXD))
+       /* Both TSO and single send will work if count is less than 8 */
+       if (likely(count < I40E_MAX_BUFFER_TXD))
                return false;
 
-       return __i40e_chk_linearize(skb);
+       if (skb_is_gso(skb))
+               return __i40e_chk_linearize(skb);
+
+       /* we can support up to 8 data buffers for a single send */
+       return count != I40E_MAX_BUFFER_TXD;
 }
 #endif /* _I40E_TXRX_H_ */
index ebcc25c05796d7d42372395ba9f8d3bf838d547a..cea97daa844c9a362417d253d10273b364b0c7a9 100644 (file)
@@ -1796,35 +1796,34 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
 }
 
 /**
- * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet
+ * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
  * @skb:      send buffer
  *
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
  **/
 bool __i40evf_chk_linearize(struct sk_buff *skb)
 {
        const struct skb_frag_struct *frag, *stale;
-       int gso_size, nr_frags, sum;
-
-       /* check to see if TSO is enabled, if so we may get a repreive */
-       gso_size = skb_shinfo(skb)->gso_size;
-       if (unlikely(!gso_size))
-               return true;
+       int nr_frags, sum;
 
-       /* no need to check if number of frags is less than 8 */
+       /* no need to check if number of frags is less than 7 */
        nr_frags = skb_shinfo(skb)->nr_frags;
-       if (nr_frags < I40E_MAX_BUFFER_TXD)
+       if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
                return false;
 
        /* We need to walk through the list and validate that each group
         * of 6 fragments totals at least gso_size.  However we don't need
-        * to perform such validation on the first or last 6 since the first
-        * 6 cannot inherit any data from a descriptor before them, and the
-        * last 6 cannot inherit any data from a descriptor after them.
+        * to perform such validation on the last 6 since the last 6 cannot
+        * inherit any data from a descriptor after them.
         */
-       nr_frags -= I40E_MAX_BUFFER_TXD - 1;
+       nr_frags -= I40E_MAX_BUFFER_TXD - 2;
        frag = &skb_shinfo(skb)->frags[0];
 
        /* Initialize size to the negative value of gso_size minus 1.  We
@@ -1833,21 +1832,21 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
         * descriptors for a single transmit as the header and previous
         * fragment are already consuming 2 descriptors.
         */
-       sum = 1 - gso_size;
+       sum = 1 - skb_shinfo(skb)->gso_size;
 
-       /* Add size of frags 1 through 5 to create our initial sum */
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
+       /* Add size of frags 0 through 4 to create our initial sum */
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
 
        /* Walk through fragments adding latest fragment, testing it, and
         * then removing stale fragments from the sum.
         */
        stale = &skb_shinfo(skb)->frags[0];
        for (;;) {
-               sum += skb_frag_size(++frag);
+               sum += skb_frag_size(frag++);
 
                /* if sum is negative we failed to make sufficient progress */
                if (sum < 0)
@@ -1857,7 +1856,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
                if (!--nr_frags)
                        break;
 
-               sum -= skb_frag_size(++stale);
+               sum -= skb_frag_size(stale++);
        }
 
        return false;
index c1dd8c5c966697bc79ae47babbab8a598d20ef18..0429553fe8870cb7f38ae07ab13bb0c94d07dbe0 100644 (file)
@@ -395,10 +395,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  **/
 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
 {
-       /* we can only support up to 8 data buffers for a single send */
-       if (likely(count <= I40E_MAX_BUFFER_TXD))
+       /* Both TSO and single send will work if count is less than 8 */
+       if (likely(count < I40E_MAX_BUFFER_TXD))
                return false;
 
-       return __i40evf_chk_linearize(skb);
+       if (skb_is_gso(skb))
+               return __i40evf_chk_linearize(skb);
+
+       /* we can support up to 8 data buffers for a single send */
+       return count != I40E_MAX_BUFFER_TXD;
 }
 #endif /* _I40E_TXRX_H_ */
index f69584a9b47fbef008a1ec138feba9f10504d997..c761194bb32352be84c2610464a9778d1a365c34 100644 (file)
@@ -337,7 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
        case ETH_SS_STATS:
                return bitmap_iterator_count(&it) +
                        (priv->tx_ring_num * 2) +
-                       (priv->rx_ring_num * 2);
+                       (priv->rx_ring_num * 3);
        case ETH_SS_TEST:
                return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
                                        & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
@@ -404,6 +404,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
        for (i = 0; i < priv->rx_ring_num; i++) {
                data[index++] = priv->rx_ring[i]->packets;
                data[index++] = priv->rx_ring[i]->bytes;
+               data[index++] = priv->rx_ring[i]->dropped;
        }
        spin_unlock_bh(&priv->stats_lock);
 
@@ -477,6 +478,8 @@ static void mlx4_en_get_strings(struct net_device *dev,
                                "rx%d_packets", i);
                        sprintf(data + (index++) * ETH_GSTRING_LEN,
                                "rx%d_bytes", i);
+                       sprintf(data + (index++) * ETH_GSTRING_LEN,
+                               "rx%d_dropped", i);
                }
                break;
        case ETH_SS_PRIV_FLAGS:
index 3904b5fc0b7c904548763ffbb2d31fbcccfe5b6c..20b6c2e678b8879525ae41c995d5bc7692bfdec5 100644 (file)
@@ -158,6 +158,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        u64 in_mod = reset << 8 | port;
        int err;
        int i, counter_index;
+       unsigned long sw_rx_dropped = 0;
 
        mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
        if (IS_ERR(mailbox))
@@ -180,6 +181,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        for (i = 0; i < priv->rx_ring_num; i++) {
                stats->rx_packets += priv->rx_ring[i]->packets;
                stats->rx_bytes += priv->rx_ring[i]->bytes;
+               sw_rx_dropped += priv->rx_ring[i]->dropped;
                priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
                priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
                priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
@@ -236,7 +238,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
                                          &mlx4_en_stats->MCAST_prio_1,
                                          NUM_PRIORITIES);
        stats->collisions = 0;
-       stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
+       stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
+                           sw_rx_dropped;
        stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
        stats->rx_over_errors = 0;
        stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
index 86bcfe510e4e5b930b94de462f5ff9a76d4183c4..b723e3bcab39e83653be5367da514bd876b9d0cd 100644 (file)
@@ -61,7 +61,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
                gfp_t gfp = _gfp;
 
                if (order)
-                       gfp |= __GFP_COMP | __GFP_NOWARN;
+                       gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
                page = alloc_pages(gfp, order);
                if (likely(page))
                        break;
@@ -126,7 +126,9 @@ out:
                        dma_unmap_page(priv->ddev, page_alloc[i].dma,
                                page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
                        page = page_alloc[i].page;
-                       set_page_count(page, 1);
+                       /* Revert changes done by mlx4_alloc_pages */
+                       page_ref_sub(page, page_alloc[i].page_size /
+                                          priv->frag_info[i].frag_stride - 1);
                        put_page(page);
                }
        }
@@ -176,7 +178,9 @@ out:
                dma_unmap_page(priv->ddev, page_alloc->dma,
                               page_alloc->page_size, PCI_DMA_FROMDEVICE);
                page = page_alloc->page;
-               set_page_count(page, 1);
+               /* Revert changes done by mlx4_alloc_pages */
+               page_ref_sub(page, page_alloc->page_size /
+                                  priv->frag_info[i].frag_stride - 1);
                put_page(page);
                page_alloc->page = NULL;
        }
@@ -939,7 +943,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                /* GRO not possible, complete processing here */
                skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
                if (!skb) {
-                       priv->stats.rx_dropped++;
+                       ring->dropped++;
                        goto next;
                }
 
index 358f7230da589fc6e0b8b605255f126ae546d7db..12c77a70abdb451c475680c05bb2b7cf86436cdf 100644 (file)
@@ -3172,6 +3172,34 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
        return 0;
 }
 
+static int mlx4_pci_enable_device(struct mlx4_dev *dev)
+{
+       struct pci_dev *pdev = dev->persist->pdev;
+       int err = 0;
+
+       mutex_lock(&dev->persist->pci_status_mutex);
+       if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
+               err = pci_enable_device(pdev);
+               if (!err)
+                       dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
+       }
+       mutex_unlock(&dev->persist->pci_status_mutex);
+
+       return err;
+}
+
+static void mlx4_pci_disable_device(struct mlx4_dev *dev)
+{
+       struct pci_dev *pdev = dev->persist->pdev;
+
+       mutex_lock(&dev->persist->pci_status_mutex);
+       if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
+               pci_disable_device(pdev);
+               dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
+       }
+       mutex_unlock(&dev->persist->pci_status_mutex);
+}
+
 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
                         int total_vfs, int *nvfs, struct mlx4_priv *priv,
                         int reset_flow)
@@ -3582,7 +3610,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
 
        pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
 
-       err = pci_enable_device(pdev);
+       err = mlx4_pci_enable_device(&priv->dev);
        if (err) {
                dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
                return err;
@@ -3715,7 +3743,7 @@ err_release_regions:
        pci_release_regions(pdev);
 
 err_disable_pdev:
-       pci_disable_device(pdev);
+       mlx4_pci_disable_device(&priv->dev);
        pci_set_drvdata(pdev, NULL);
        return err;
 }
@@ -3775,6 +3803,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        priv->pci_dev_data = id->driver_data;
        mutex_init(&dev->persist->device_state_mutex);
        mutex_init(&dev->persist->interface_state_mutex);
+       mutex_init(&dev->persist->pci_status_mutex);
 
        ret = devlink_register(devlink, &pdev->dev);
        if (ret)
@@ -3923,7 +3952,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
        }
 
        pci_release_regions(pdev);
-       pci_disable_device(pdev);
+       mlx4_pci_disable_device(dev);
        devlink_unregister(devlink);
        kfree(dev->persist);
        devlink_free(devlink);
@@ -4042,7 +4071,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
        if (state == pci_channel_io_perm_failure)
                return PCI_ERS_RESULT_DISCONNECT;
 
-       pci_disable_device(pdev);
+       mlx4_pci_disable_device(persist->dev);
        return PCI_ERS_RESULT_NEED_RESET;
 }
 
@@ -4050,45 +4079,53 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
 {
        struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
        struct mlx4_dev  *dev  = persist->dev;
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int               ret;
-       int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
-       int total_vfs;
+       int err;
 
        mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
-       ret = pci_enable_device(pdev);
-       if (ret) {
-               mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
+       err = mlx4_pci_enable_device(dev);
+       if (err) {
+               mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
        pci_set_master(pdev);
        pci_restore_state(pdev);
        pci_save_state(pdev);
+       return PCI_ERS_RESULT_RECOVERED;
+}
 
+static void mlx4_pci_resume(struct pci_dev *pdev)
+{
+       struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+       struct mlx4_dev  *dev  = persist->dev;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+       int total_vfs;
+       int err;
+
+       mlx4_err(dev, "%s was called\n", __func__);
        total_vfs = dev->persist->num_vfs;
        memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
 
        mutex_lock(&persist->interface_state_mutex);
        if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
-               ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
+               err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
                                    priv, 1);
-               if (ret) {
-                       mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
-                                __func__,  ret);
+               if (err) {
+                       mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
+                                __func__,  err);
                        goto end;
                }
 
-               ret = restore_current_port_types(dev, dev->persist->
+               err = restore_current_port_types(dev, dev->persist->
                                                 curr_port_type, dev->persist->
                                                 curr_port_poss_type);
-               if (ret)
-                       mlx4_err(dev, "could not restore original port types (%d)\n", ret);
+               if (err)
+                       mlx4_err(dev, "could not restore original port types (%d)\n", err);
        }
 end:
        mutex_unlock(&persist->interface_state_mutex);
 
-       return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
 }
 
 static void mlx4_shutdown(struct pci_dev *pdev)
@@ -4105,6 +4142,7 @@ static void mlx4_shutdown(struct pci_dev *pdev)
 static const struct pci_error_handlers mlx4_err_handler = {
        .error_detected = mlx4_pci_err_detected,
        .slot_reset     = mlx4_pci_slot_reset,
+       .resume         = mlx4_pci_resume,
 };
 
 static struct pci_driver mlx4_driver = {
index ef9683101eada7081163b7f6bf1473c18bd63da2..c9d7fc5159f2fb84e62496d810828e65324e7403 100644 (file)
@@ -586,6 +586,8 @@ struct mlx4_mfunc_master_ctx {
        struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
        int                     init_port_ref[MLX4_MAX_PORTS + 1];
        u16                     max_mtu[MLX4_MAX_PORTS + 1];
+       u8                      pptx;
+       u8                      pprx;
        int                     disable_mcast_ref[MLX4_MAX_PORTS + 1];
        struct mlx4_resource_tracker res_tracker;
        struct workqueue_struct *comm_wq;
index d12ab6a733446c52ba2afb037768bb2dd43f3515..63b1aeae2c037cd585b1473db97134644e188e03 100644 (file)
@@ -323,6 +323,7 @@ struct mlx4_en_rx_ring {
        unsigned long csum_ok;
        unsigned long csum_none;
        unsigned long csum_complete;
+       unsigned long dropped;
        int hwtstamp_rx_filter;
        cpumask_var_t affinity_mask;
 };
index 211c65087997dd5a92cc53a13f9fd3869927d09d..087b23b320cb44ff88dce362f47437de725dfeb5 100644 (file)
@@ -1317,6 +1317,19 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
                        }
 
                        gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+                       /* Slave cannot change Global Pause configuration */
+                       if (slave != mlx4_master_func_num(dev) &&
+                           ((gen_context->pptx != master->pptx) ||
+                            (gen_context->pprx != master->pprx))) {
+                               gen_context->pptx = master->pptx;
+                               gen_context->pprx = master->pprx;
+                               mlx4_warn(dev,
+                                         "denying Global Pause change for slave:%d\n",
+                                         slave);
+                       } else {
+                               master->pptx = gen_context->pptx;
+                               master->pprx = gen_context->pprx;
+                       }
                        break;
                case MLX4_SET_PORT_GID_TABLE:
                        /* change to MULTIPLE entries: number of guest's gids
index 518af329502ddff20c8fc322ebed3303b7b0b9a9..7869465435fa81e9c9ebd9e5ae780b01efb20087 100644 (file)
@@ -750,6 +750,12 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
        return false;
 }
 
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+       qed_chain_consume(&rxq->rx_bd_ring);
+       rxq->sw_rx_cons++;
+}
+
 /* This function reuses the buffer(from an offset) from
  * consumer index to producer index in the bd ring
  */
@@ -773,6 +779,21 @@ static inline void qede_reuse_page(struct qede_dev *edev,
        curr_cons->data = NULL;
 }
 
+/* In case of allocation failures reuse buffers
+ * from consumer index to produce buffers for firmware
+ */
+static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+                                   struct qede_dev *edev, u8 count)
+{
+       struct sw_rx_data *curr_cons;
+
+       for (; count > 0; count--) {
+               curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+               qede_reuse_page(edev, rxq, curr_cons);
+               qede_rx_bd_ring_consume(rxq);
+       }
+}
+
 static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
                                         struct qede_rx_queue *rxq,
                                         struct sw_rx_data *curr_cons)
@@ -781,8 +802,14 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
        curr_cons->page_offset += rxq->rx_buf_seg_size;
 
        if (curr_cons->page_offset == PAGE_SIZE) {
-               if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+               if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+                       /* Since we failed to allocate new buffer
+                        * current buffer can be used again.
+                        */
+                       curr_cons->page_offset -= rxq->rx_buf_seg_size;
+
                        return -ENOMEM;
+               }
 
                dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
                               PAGE_SIZE, DMA_FROM_DEVICE);
@@ -901,7 +928,10 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
                           len_on_bd);
 
        if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
-               tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+               /* Incr page ref count to reuse on allocation failure
+                * so that it doesn't get freed while freeing SKB.
+                */
+               atomic_inc(&current_bd->data->_count);
                goto out;
        }
 
@@ -915,6 +945,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
        return 0;
 
 out:
+       tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+       qede_recycle_rx_bd_ring(rxq, edev, 1);
        return -ENOMEM;
 }
 
@@ -966,8 +998,9 @@ static void qede_tpa_start(struct qede_dev *edev,
        tpa_info->skb = netdev_alloc_skb(edev->ndev,
                                         le16_to_cpu(cqe->len_on_first_bd));
        if (unlikely(!tpa_info->skb)) {
+               DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
                tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
-               return;
+               goto cons_buf;
        }
 
        skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
@@ -990,6 +1023,7 @@ static void qede_tpa_start(struct qede_dev *edev,
        /* This is needed in order to enable forwarding support */
        qede_set_gro_params(edev, tpa_info->skb, cqe);
 
+cons_buf: /* We still need to handle bd_len_list to consume buffers */
        if (likely(cqe->ext_bd_len_list[0]))
                qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
                                   le16_to_cpu(cqe->ext_bd_len_list[0]));
@@ -1007,7 +1041,6 @@ static void qede_gro_ip_csum(struct sk_buff *skb)
        const struct iphdr *iph = ip_hdr(skb);
        struct tcphdr *th;
 
-       skb_set_network_header(skb, 0);
        skb_set_transport_header(skb, sizeof(struct iphdr));
        th = tcp_hdr(skb);
 
@@ -1022,7 +1055,6 @@ static void qede_gro_ipv6_csum(struct sk_buff *skb)
        struct ipv6hdr *iph = ipv6_hdr(skb);
        struct tcphdr *th;
 
-       skb_set_network_header(skb, 0);
        skb_set_transport_header(skb, sizeof(struct ipv6hdr));
        th = tcp_hdr(skb);
 
@@ -1037,8 +1069,21 @@ static void qede_gro_receive(struct qede_dev *edev,
                             struct sk_buff *skb,
                             u16 vlan_tag)
 {
+       /* FW can send a single MTU sized packet from gro flow
+        * due to aggregation timeout/last segment etc. which
+        * is not expected to be a gro packet. If a skb has zero
+        * frags then simply push it in the stack as non gso skb.
+        */
+       if (unlikely(!skb->data_len)) {
+               skb_shinfo(skb)->gso_type = 0;
+               skb_shinfo(skb)->gso_size = 0;
+               goto send_skb;
+       }
+
 #ifdef CONFIG_INET
        if (skb_shinfo(skb)->gso_size) {
+               skb_set_network_header(skb, 0);
+
                switch (skb->protocol) {
                case htons(ETH_P_IP):
                        qede_gro_ip_csum(skb);
@@ -1053,6 +1098,8 @@ static void qede_gro_receive(struct qede_dev *edev,
                }
        }
 #endif
+
+send_skb:
        skb_record_rx_queue(skb, fp->rss_id);
        qede_skb_receive(edev, fp, skb, vlan_tag);
 }
@@ -1244,17 +1291,17 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
                                  "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
                                  sw_comp_cons, parse_flag);
                        rxq->rx_hw_errors++;
-                       qede_reuse_page(edev, rxq, sw_rx_data);
-                       goto next_rx;
+                       qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
+                       goto next_cqe;
                }
 
                skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
                if (unlikely(!skb)) {
                        DP_NOTICE(edev,
                                  "Build_skb failed, dropping incoming packet\n");
-                       qede_reuse_page(edev, rxq, sw_rx_data);
+                       qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
                        rxq->rx_alloc_errors++;
-                       goto next_rx;
+                       goto next_cqe;
                }
 
                /* Copy data into SKB */
@@ -1288,11 +1335,22 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
                        if (unlikely(qede_realloc_rx_buffer(edev, rxq,
                                                            sw_rx_data))) {
                                DP_ERR(edev, "Failed to allocate rx buffer\n");
+                               /* Incr page ref count to reuse on allocation
+                                * failure so that it doesn't get freed while
+                                * freeing SKB.
+                                */
+
+                               atomic_inc(&sw_rx_data->data->_count);
                                rxq->rx_alloc_errors++;
+                               qede_recycle_rx_bd_ring(rxq, edev,
+                                                       fp_cqe->bd_num);
+                               dev_kfree_skb_any(skb);
                                goto next_cqe;
                        }
                }
 
+               qede_rx_bd_ring_consume(rxq);
+
                if (fp_cqe->bd_num != 1) {
                        u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
                        u8 num_frags;
@@ -1303,18 +1361,27 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
                             num_frags--) {
                                u16 cur_size = pkt_len > rxq->rx_buf_size ?
                                                rxq->rx_buf_size : pkt_len;
+                               if (unlikely(!cur_size)) {
+                                       DP_ERR(edev,
+                                              "Still got %d BDs for mapping jumbo, but length became 0\n",
+                                              num_frags);
+                                       qede_recycle_rx_bd_ring(rxq, edev,
+                                                               num_frags);
+                                       dev_kfree_skb_any(skb);
+                                       goto next_cqe;
+                               }
 
-                               WARN_ONCE(!cur_size,
-                                         "Still got %d BDs for mapping jumbo, but length became 0\n",
-                                         num_frags);
-
-                               if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+                               if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+                                       qede_recycle_rx_bd_ring(rxq, edev,
+                                                               num_frags);
+                                       dev_kfree_skb_any(skb);
                                        goto next_cqe;
+                               }
 
-                               rxq->sw_rx_cons++;
                                sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
                                sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
-                               qed_chain_consume(&rxq->rx_bd_ring);
+                               qede_rx_bd_ring_consume(rxq);
+
                                dma_unmap_page(&edev->pdev->dev,
                                               sw_rx_data->mapping,
                                               PAGE_SIZE, DMA_FROM_DEVICE);
@@ -1330,7 +1397,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
                                pkt_len -= cur_size;
                        }
 
-                       if (pkt_len)
+                       if (unlikely(pkt_len))
                                DP_ERR(edev,
                                       "Mapped all BDs of jumbo, but still have %d bytes\n",
                                       pkt_len);
@@ -1349,10 +1416,6 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
                skb_record_rx_queue(skb, fp->rss_id);
 
                qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
-
-               qed_chain_consume(&rxq->rx_bd_ring);
-next_rx:
-               rxq->sw_rx_cons++;
 next_rx_only:
                rx_pkt++;
 
@@ -2257,7 +2320,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
                struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
                struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
 
-               if (replace_buf) {
+               if (replace_buf->data) {
                        dma_unmap_page(&edev->pdev->dev,
                                       dma_unmap_addr(replace_buf, mapping),
                                       PAGE_SIZE, DMA_FROM_DEVICE);
@@ -2377,7 +2440,7 @@ err:
 static int qede_alloc_mem_rxq(struct qede_dev *edev,
                              struct qede_rx_queue *rxq)
 {
-       int i, rc, size, num_allocated;
+       int i, rc, size;
 
        rxq->num_rx_buffers = edev->q_num_rx_buffers;
 
@@ -2394,6 +2457,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
        rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
        if (!rxq->sw_rx_ring) {
                DP_ERR(edev, "Rx buffers ring allocation failed\n");
+               rc = -ENOMEM;
                goto err;
        }
 
@@ -2421,26 +2485,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
        /* Allocate buffers for the Rx ring */
        for (i = 0; i < rxq->num_rx_buffers; i++) {
                rc = qede_alloc_rx_buffer(edev, rxq);
-               if (rc)
-                       break;
-       }
-       num_allocated = i;
-       if (!num_allocated) {
-               DP_ERR(edev, "Rx buffers allocation failed\n");
-               goto err;
-       } else if (num_allocated < rxq->num_rx_buffers) {
-               DP_NOTICE(edev,
-                         "Allocated less buffers than desired (%d allocated)\n",
-                         num_allocated);
+               if (rc) {
+                       DP_ERR(edev,
+                              "Rx buffers allocation failed at index %d\n", i);
+                       goto err;
+               }
        }
 
-       qede_alloc_sge_mem(edev, rxq);
-
-       return 0;
-
+       rc = qede_alloc_sge_mem(edev, rxq);
 err:
-       qede_free_mem_rxq(edev, rxq);
-       return -ENOMEM;
+       return rc;
 }
 
 static void qede_free_mem_txq(struct qede_dev *edev,
@@ -2523,10 +2577,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev,
        }
 
        return 0;
-
 err:
-       qede_free_mem_fp(edev, fp);
-       return -ENOMEM;
+       return rc;
 }
 
 static void qede_free_mem_load(struct qede_dev *edev)
@@ -2549,22 +2601,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
                struct qede_fastpath *fp = &edev->fp_array[rss_id];
 
                rc = qede_alloc_mem_fp(edev, fp);
-               if (rc)
-                       break;
-       }
-
-       if (rss_id != QEDE_RSS_CNT(edev)) {
-               /* Failed allocating memory for all the queues */
-               if (!rss_id) {
+               if (rc) {
                        DP_ERR(edev,
-                              "Failed to allocate memory for the leading queue\n");
-                       rc = -ENOMEM;
-               } else {
-                       DP_NOTICE(edev,
-                                 "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
-                                 QEDE_RSS_CNT(edev), rss_id);
+                              "Failed to allocate memory for fastpath - rss id = %d\n",
+                              rss_id);
+                       qede_free_mem_load(edev);
+                       return rc;
                }
-               edev->num_rss = rss_id;
        }
 
        return 0;
index 087e14a3fba726cd0637cbd12f2138b7dbe8adbf..9e2a0bd8f5a88803b4d1e260e18ffff9f119d541 100644 (file)
@@ -1691,6 +1691,9 @@ static int ravb_set_gti(struct net_device *ndev)
        rate = clk_get_rate(clk);
        clk_put(clk);
 
+       if (!rate)
+               return -EINVAL;
+
        inc = 1000000000ULL << 20;
        do_div(inc, rate);
 
index 004e2d7560fd86e289980a2526a8e6e9f4fb77c3..ceea74cc22293f6dc26141fc9a0d33ed718520ce 100644 (file)
@@ -2194,17 +2194,13 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
                                   __func__);
                        return ret;
                }
-               ret = sh_eth_dev_init(ndev, false);
+               ret = sh_eth_dev_init(ndev, true);
                if (ret < 0) {
                        netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
                                   __func__);
                        return ret;
                }
 
-               mdp->irq_enabled = true;
-               sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
-               /* Setting the Rx mode will start the Rx process. */
-               sh_eth_write(ndev, EDRRR_R, EDRRR);
                netif_device_attach(ndev);
        }
 
index f0d797ab74d8f2ea927c6956525b728b9c20ddca..44022b1845ce334c1f2a7530a081f5c31746d324 100644 (file)
@@ -34,6 +34,9 @@
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
 #define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
 
+#define SYSMGR_FPGAGRP_MODULE_REG  0x00000028
+#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
+
 #define EMAC_SPLITTER_CTRL_REG                 0x0
 #define EMAC_SPLITTER_CTRL_SPEED_MASK          0x3
 #define EMAC_SPLITTER_CTRL_SPEED_10            0x2
@@ -148,7 +151,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
        int phymode = dwmac->interface;
        u32 reg_offset = dwmac->reg_offset;
        u32 reg_shift = dwmac->reg_shift;
-       u32 ctrl, val;
+       u32 ctrl, val, module;
 
        switch (phymode) {
        case PHY_INTERFACE_MODE_RGMII:
@@ -175,12 +178,19 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
        ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
        ctrl |= val << reg_shift;
 
-       if (dwmac->f2h_ptp_ref_clk)
+       if (dwmac->f2h_ptp_ref_clk) {
                ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
-       else
+               regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+                           &module);
+               module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
+               regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+                            module);
+       } else {
                ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
+       }
 
        regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
+
        return 0;
 }
 
index 42fdfd4d9d4f9873c8b26f1518aa56b3c5e719b0..bbb77cd8ad6776df47d01549cceb7165735c15f0 100644 (file)
@@ -1251,12 +1251,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
        int i, ret;
        u32 reg;
 
+       pm_runtime_get_sync(&priv->pdev->dev);
+
        if (!cpsw_common_res_usage_state(priv))
                cpsw_intr_disable(priv);
        netif_carrier_off(ndev);
 
-       pm_runtime_get_sync(&priv->pdev->dev);
-
        reg = priv->version;
 
        dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
index 5d9abedd6b757ca65000e5dea8155acf0eac9412..58d58f002559f627040a8bfb3fa452f671c62e44 100644 (file)
@@ -1878,8 +1878,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
                pdata->hw_ram_addr = auxdata->hw_ram_addr;
        }
 
-       pdev->dev.platform_data = pdata;
-
        return  pdata;
 }
 
@@ -2101,6 +2099,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
        cpdma_ctlr_destroy(priv->dma);
 
        unregister_netdev(ndev);
+       pm_runtime_disable(&pdev->dev);
        free_netdev(ndev);
 
        return 0;
index b5d50d45872893ffcbdef86f8317d0512b2e2a32..93ffedfa299412f78af2c72fedc991101a52a451 100644 (file)
@@ -441,7 +441,7 @@ static int ks8995_probe(struct spi_device *spi)
                return -ENOMEM;
 
        mutex_init(&ks->lock);
-       ks->spi = spi_dev_get(spi);
+       ks->spi = spi;
        ks->chip = &ks8995_chip[variant];
 
        if (ks->spi->dev.of_node) {
index bdd83d95ec0aa677b8443476ef2ee66018aff701..96a5028621c8b320c2d6feca5911f40cde10771e 100644 (file)
@@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
          .driver_info = (unsigned long)&cdc_mbim_info,
        },
-       /* Huawei E3372 fails unless NDP comes after the IP packets */
-       { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+
+       /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
+        * (12d1:157d), are known to fail unless the NDP is placed
+        * after the IP packets.  Applying the quirk to all Huawei
+        * devices is broader than necessary, but harmless.
+        */
+       { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
          .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
        },
        /* default entry */
index b2348f67b00a7044189f6650000eea470a2ae386..db8022ae415bd234c19c8348dc6f90697d6ad840 100644 (file)
@@ -1152,12 +1152,16 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
                union Vmxnet3_GenericDesc *gdesc)
 {
        if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
-               /* typical case: TCP/UDP over IP and both csums are correct */
-               if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
-                                                       VMXNET3_RCD_CSUM_OK) {
+               if (gdesc->rcd.v4 &&
+                   (le32_to_cpu(gdesc->dword[3]) &
+                    VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
+                       BUG_ON(gdesc->rcd.frg);
+               } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
+                                            (1 << VMXNET3_RCD_TUC_SHIFT))) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
-                       BUG_ON(!(gdesc->rcd.v4  || gdesc->rcd.v6));
                        BUG_ON(gdesc->rcd.frg);
                } else {
                        if (gdesc->rcd.csum) {
index 729c344e677499b6f737aa622d6925c07d647a17..c4825392d64b6c8de17822532d113689f88d8bb7 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.6.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.4.7.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01040600
+#define VMXNET3_DRIVER_VERSION_NUM      0x01040700
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
index 9a9fabb900c19e5f7dc483a63f1b71fcd1a96e5a..8a8f1e58b4155808e2b28abb91d85ea772c7fd63 100644 (file)
@@ -60,41 +60,6 @@ struct pcpu_dstats {
        struct u64_stats_sync   syncp;
 };
 
-static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
-{
-       return dst;
-}
-
-static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
-       return ip_local_out(net, sk, skb);
-}
-
-static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
-{
-       /* TO-DO: return max ethernet size? */
-       return dst->dev->mtu;
-}
-
-static void vrf_dst_destroy(struct dst_entry *dst)
-{
-       /* our dst lives forever - or until the device is closed */
-}
-
-static unsigned int vrf_default_advmss(const struct dst_entry *dst)
-{
-       return 65535 - 40;
-}
-
-static struct dst_ops vrf_dst_ops = {
-       .family         = AF_INET,
-       .local_out      = vrf_ip_local_out,
-       .check          = vrf_ip_check,
-       .mtu            = vrf_v4_mtu,
-       .destroy        = vrf_dst_destroy,
-       .default_advmss = vrf_default_advmss,
-};
-
 /* neighbor handling is done with actual device; do not want
  * to flip skb->dev for those ndisc packets. This really fails
  * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
@@ -349,46 +314,6 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
-static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
-{
-       return dst;
-}
-
-static struct dst_ops vrf_dst_ops6 = {
-       .family         = AF_INET6,
-       .local_out      = ip6_local_out,
-       .check          = vrf_ip6_check,
-       .mtu            = vrf_v4_mtu,
-       .destroy        = vrf_dst_destroy,
-       .default_advmss = vrf_default_advmss,
-};
-
-static int init_dst_ops6_kmem_cachep(void)
-{
-       vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
-                                                    sizeof(struct rt6_info),
-                                                    0,
-                                                    SLAB_HWCACHE_ALIGN,
-                                                    NULL);
-
-       if (!vrf_dst_ops6.kmem_cachep)
-               return -ENOMEM;
-
-       return 0;
-}
-
-static void free_dst_ops6_kmem_cachep(void)
-{
-       kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
-}
-
-static int vrf_input6(struct sk_buff *skb)
-{
-       skb->dev->stats.rx_errors++;
-       kfree_skb(skb);
-       return 0;
-}
-
 /* modelled after ip6_finish_output2 */
 static int vrf_finish_output6(struct net *net, struct sock *sk,
                              struct sk_buff *skb)
@@ -429,67 +354,34 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
                            !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
 
-static void vrf_rt6_destroy(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_vrf *vrf)
 {
-       dst_destroy(&vrf->rt6->dst);
-       free_percpu(vrf->rt6->rt6i_pcpu);
+       dst_release(&vrf->rt6->dst);
        vrf->rt6 = NULL;
 }
 
 static int vrf_rt6_create(struct net_device *dev)
 {
        struct net_vrf *vrf = netdev_priv(dev);
-       struct dst_entry *dst;
+       struct net *net = dev_net(dev);
        struct rt6_info *rt6;
-       int cpu;
        int rc = -ENOMEM;
 
-       rt6 = dst_alloc(&vrf_dst_ops6, dev, 0,
-                       DST_OBSOLETE_NONE,
-                       (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+       rt6 = ip6_dst_alloc(net, dev,
+                           DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
        if (!rt6)
                goto out;
 
-       dst = &rt6->dst;
-
-       rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
-       if (!rt6->rt6i_pcpu) {
-               dst_destroy(dst);
-               goto out;
-       }
-       for_each_possible_cpu(cpu) {
-               struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
-               *p =  NULL;
-       }
-
-       memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
-
-       INIT_LIST_HEAD(&rt6->rt6i_siblings);
-       INIT_LIST_HEAD(&rt6->rt6i_uncached);
-
-       rt6->dst.input  = vrf_input6;
        rt6->dst.output = vrf_output6;
-
-       rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id);
-
-       atomic_set(&rt6->dst.__refcnt, 2);
-
+       rt6->rt6i_table = fib6_get_table(net, vrf->tb_id);
+       dst_hold(&rt6->dst);
        vrf->rt6 = rt6;
        rc = 0;
 out:
        return rc;
 }
 #else
-static int init_dst_ops6_kmem_cachep(void)
-{
-       return 0;
-}
-
-static void free_dst_ops6_kmem_cachep(void)
-{
-}
-
-static void vrf_rt6_destroy(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_vrf *vrf)
 {
 }
 
@@ -557,11 +449,11 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
-static void vrf_rtable_destroy(struct net_vrf *vrf)
+static void vrf_rtable_release(struct net_vrf *vrf)
 {
        struct dst_entry *dst = (struct dst_entry *)vrf->rth;
 
-       dst_destroy(dst);
+       dst_release(dst);
        vrf->rth = NULL;
 }
 
@@ -570,22 +462,10 @@ static struct rtable *vrf_rtable_create(struct net_device *dev)
        struct net_vrf *vrf = netdev_priv(dev);
        struct rtable *rth;
 
-       rth = dst_alloc(&vrf_dst_ops, dev, 2,
-                       DST_OBSOLETE_NONE,
-                       (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+       rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
        if (rth) {
                rth->dst.output = vrf_output;
-               rth->rt_genid   = rt_genid_ipv4(dev_net(dev));
-               rth->rt_flags   = 0;
-               rth->rt_type    = RTN_UNICAST;
-               rth->rt_is_input = 0;
-               rth->rt_iif     = 0;
-               rth->rt_pmtu    = 0;
-               rth->rt_gateway = 0;
-               rth->rt_uses_gateway = 0;
                rth->rt_table_id = vrf->tb_id;
-               INIT_LIST_HEAD(&rth->rt_uncached);
-               rth->rt_uncached_list = NULL;
        }
 
        return rth;
@@ -673,8 +553,8 @@ static void vrf_dev_uninit(struct net_device *dev)
        struct net_device *port_dev;
        struct list_head *iter;
 
-       vrf_rtable_destroy(vrf);
-       vrf_rt6_destroy(vrf);
+       vrf_rtable_release(vrf);
+       vrf_rt6_release(vrf);
 
        netdev_for_each_lower_dev(dev, port_dev, iter)
                vrf_del_slave(dev, port_dev);
@@ -704,7 +584,7 @@ static int vrf_dev_init(struct net_device *dev)
        return 0;
 
 out_rth:
-       vrf_rtable_destroy(vrf);
+       vrf_rtable_release(vrf);
 out_stats:
        free_percpu(dev->dstats);
        dev->dstats = NULL;
@@ -737,7 +617,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
                struct net_vrf *vrf = netdev_priv(dev);
 
                rth = vrf->rth;
-               atomic_inc(&rth->dst.__refcnt);
+               dst_hold(&rth->dst);
        }
 
        return rth;
@@ -788,7 +668,7 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
                struct net_vrf *vrf = netdev_priv(dev);
 
                rt = vrf->rt6;
-               atomic_inc(&rt->dst.__refcnt);
+               dst_hold(&rt->dst);
        }
 
        return (struct dst_entry *)rt;
@@ -946,19 +826,6 @@ static int __init vrf_init_module(void)
 {
        int rc;
 
-       vrf_dst_ops.kmem_cachep =
-               kmem_cache_create("vrf_ip_dst_cache",
-                                 sizeof(struct rtable), 0,
-                                 SLAB_HWCACHE_ALIGN,
-                                 NULL);
-
-       if (!vrf_dst_ops.kmem_cachep)
-               return -ENOMEM;
-
-       rc = init_dst_ops6_kmem_cachep();
-       if (rc != 0)
-               goto error2;
-
        register_netdevice_notifier(&vrf_notifier_block);
 
        rc = rtnl_link_register(&vrf_link_ops);
@@ -969,22 +836,10 @@ static int __init vrf_init_module(void)
 
 error:
        unregister_netdevice_notifier(&vrf_notifier_block);
-       free_dst_ops6_kmem_cachep();
-error2:
-       kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
        return rc;
 }
 
-static void __exit vrf_cleanup_module(void)
-{
-       rtnl_link_unregister(&vrf_link_ops);
-       unregister_netdevice_notifier(&vrf_notifier_block);
-       kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
-       free_dst_ops6_kmem_cachep();
-}
-
 module_init(vrf_init_module);
-module_exit(vrf_cleanup_module);
 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
 MODULE_LICENSE("GPL");
index 72380af9dc523e56055eecccd5402de62972412e..b0603e796ad8d0d766e79c7deaf5de4c24c06ec1 100644 (file)
@@ -5680,11 +5680,12 @@ static int b43_bcma_probe(struct bcma_device *core)
        INIT_WORK(&wl->firmware_load, b43_request_firmware);
        schedule_work(&wl->firmware_load);
 
-bcma_out:
        return err;
 
 bcma_err_wireless_exit:
        ieee80211_free_hw(wl->hw);
+bcma_out:
+       kfree(dev);
        return err;
 }
 
@@ -5712,8 +5713,8 @@ static void b43_bcma_remove(struct bcma_device *core)
        b43_rng_exit(wl);
 
        b43_leds_unregister(wl);
-
        ieee80211_free_hw(wl->hw);
+       kfree(wldev->dev);
 }
 
 static struct bcma_driver b43_bcma_driver = {
@@ -5796,6 +5797,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
 
        b43_leds_unregister(wl);
        b43_wireless_exit(dev, wl);
+       kfree(dev);
 }
 
 static struct ssb_driver b43_ssb_driver = {
index 76e649c680a16bb5930f7c6d137aa429b779360d..a50f4df7eae78d1e3682f7b1429c9b3bc42b05fb 100644 (file)
@@ -1147,6 +1147,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
        /* the fw is stopped, the aux sta is dead: clean up driver state */
        iwl_mvm_del_aux_sta(mvm);
 
+       iwl_free_fw_paging(mvm);
+
        /*
         * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
         * won't be called in this case).
index 5e8ab796d5bc06b86fcbdab0f48edb2698074579..d278399097dc5bedb72ae8bed05da5db263cd6f2 100644 (file)
@@ -761,8 +761,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
        for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
                kfree(mvm->nvm_sections[i].data);
 
-       iwl_free_fw_paging(mvm);
-
        iwl_mvm_tof_clean(mvm);
 
        ieee80211_free_hw(mvm->hw);
index eb39c7e09781e9dd04a159f04449a86bc93cc1b5..b2b79354d5c02be2b172b508a80af29ffec5a880 100644 (file)
@@ -732,8 +732,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
         */
        val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
        if (val & (BIT(1) | BIT(17))) {
-               IWL_INFO(trans,
-                        "can't access the RSA semaphore it is write protected\n");
+               IWL_DEBUG_INFO(trans,
+                              "can't access the RSA semaphore it is write protected\n");
                return 0;
        }
 
index 95dcbff4673b1490c72fec5ee9bc55bcfa5f7c20..6a8245c4ea48c4c6cc463647f1229726bea6a5d9 100644 (file)
@@ -2488,9 +2488,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
                for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
                        rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
 
-                       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                                "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
-                                rtldm->thermalvalue, thermal_value);
+               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                        "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+                        rtldm->thermalvalue, thermal_value);
                /*Record last Power Tracking Thermal Value*/
                rtldm->thermalvalue = thermal_value;
        }
index 8e09c544d892eadfff181bc67c6f9f135adefcaa..f798899338eddb6955c06f5b44c26c405dffdade 100644 (file)
@@ -103,6 +103,20 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
                        flush_dcache_page(page);
                }
        } else {
+               /*
+                * Note that we write the data both before and after
+                * clearing poison.  The write before clear poison
+                * handles situations where the latest written data is
+                * preserved and the clear poison operation simply marks
+                * the address range as valid without changing the data.
+                * In this case application software can assume that an
+                * interrupted write will either return the new good
+                * data or an error.
+                *
+                * However, if pmem_clear_poison() leaves the data in an
+                * indeterminate state we need to perform the write
+                * after clear poison.
+                */
                flush_dcache_page(page);
                memcpy_to_pmem(pmem_addr, mem + off, len);
                if (unlikely(bad_pmem)) {
index 24ccda303efb21daa2bf982021655db286ffff4b..4fd733ff72b1cb7cf38023701d640a059b4e4670 100644 (file)
@@ -1478,8 +1478,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        if (result > 0) {
                dev_err(dev->ctrl.device,
                        "Could not set queue count (%d)\n", result);
-               nr_io_queues = 0;
-               result = 0;
+               return 0;
        }
 
        if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
@@ -1513,7 +1512,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
         * If we enable msix early due to not intx, disable it again before
         * setting up the full range we need.
         */
-       if (!pdev->irq)
+       if (pdev->msi_enabled)
+               pci_disable_msi(pdev);
+       else if (pdev->msix_enabled)
                pci_disable_msix(pdev);
 
        for (i = 0; i < nr_io_queues; i++)
@@ -1696,7 +1697,6 @@ static int nvme_pci_enable(struct nvme_dev *dev)
        if (pci_enable_device_mem(pdev))
                return result;
 
-       dev->entry[0].vector = pdev->irq;
        pci_set_master(pdev);
 
        if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
@@ -1709,13 +1709,18 @@ static int nvme_pci_enable(struct nvme_dev *dev)
        }
 
        /*
-        * Some devices don't advertse INTx interrupts, pre-enable a single
-        * MSIX vec for setup. We'll adjust this later.
+        * Some devices and/or platforms don't advertise or work with INTx
+        * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
+        * adjust this later.
         */
-       if (!pdev->irq) {
-               result = pci_enable_msix(pdev, dev->entry, 1);
-               if (result < 0)
-                       goto disable;
+       if (pci_enable_msix(pdev, dev->entry, 1)) {
+               pci_enable_msi(pdev);
+               dev->entry[0].vector = pdev->irq;
+       }
+
+       if (!dev->entry[0].vector) {
+               result = -ENODEV;
+               goto disable;
        }
 
        cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1859,6 +1864,9 @@ static void nvme_reset_work(struct work_struct *work)
        if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
                nvme_dev_disable(dev, false);
 
+       if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
+               goto out;
+
        set_bit(NVME_CTRL_RESETTING, &dev->flags);
 
        result = nvme_pci_enable(dev);
@@ -2078,11 +2086,10 @@ static void nvme_remove(struct pci_dev *pdev)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
 
-       del_timer_sync(&dev->watchdog_timer);
-
        set_bit(NVME_CTRL_REMOVING, &dev->flags);
        pci_set_drvdata(pdev, NULL);
        flush_work(&dev->async_work);
+       flush_work(&dev->reset_work);
        flush_work(&dev->scan_work);
        nvme_remove_namespaces(&dev->ctrl);
        nvme_uninit_ctrl(&dev->ctrl);
index 01b9d0a00abcb06186bdb80d44c5aa9e5f05838c..d11cdbb8fba3edab6d0bfc69490c72b0c40394dd 100644 (file)
@@ -275,6 +275,19 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void
 }
 EXPORT_SYMBOL(pci_write_vpd);
 
+/**
+ * pci_set_vpd_size - Set size of Vital Product Data space
+ * @dev:       pci device struct
+ * @len:       size of vpd space
+ */
+int pci_set_vpd_size(struct pci_dev *dev, size_t len)
+{
+       if (!dev->vpd || !dev->vpd->ops)
+               return -ENODEV;
+       return dev->vpd->ops->set_size(dev, len);
+}
+EXPORT_SYMBOL(pci_set_vpd_size);
+
 #define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
 
 /**
@@ -498,9 +511,23 @@ out:
        return ret ? ret : count;
 }
 
+static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
+{
+       struct pci_vpd *vpd = dev->vpd;
+
+       if (len == 0 || len > PCI_VPD_MAX_SIZE)
+               return -EIO;
+
+       vpd->valid = 1;
+       vpd->len = len;
+
+       return 0;
+}
+
 static const struct pci_vpd_ops pci_vpd_ops = {
        .read = pci_vpd_read,
        .write = pci_vpd_write,
+       .set_size = pci_vpd_set_size,
 };
 
 static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
@@ -533,9 +560,24 @@ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
        return ret;
 }
 
+static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
+{
+       struct pci_dev *tdev = pci_get_slot(dev->bus,
+                                           PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+       int ret;
+
+       if (!tdev)
+               return -ENODEV;
+
+       ret = pci_set_vpd_size(tdev, len);
+       pci_dev_put(tdev);
+       return ret;
+}
+
 static const struct pci_vpd_ops pci_vpd_f0_ops = {
        .read = pci_vpd_f0_read,
        .write = pci_vpd_f0_write,
+       .set_size = pci_vpd_f0_set_size,
 };
 
 int pci_vpd_init(struct pci_dev *dev)
index eb5a2755a1646649a7e1aaea263a1cf118a45af4..2f817fa4c661e72274e873c38f460cefe04faa79 100644 (file)
@@ -32,7 +32,7 @@
 #define to_imx6_pcie(x)        container_of(x, struct imx6_pcie, pp)
 
 struct imx6_pcie {
-       struct gpio_desc        *reset_gpio;
+       int                     reset_gpio;
        struct clk              *pcie_bus;
        struct clk              *pcie_phy;
        struct clk              *pcie;
@@ -309,10 +309,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
        usleep_range(200, 500);
 
        /* Some boards don't have PCIe reset GPIO. */
-       if (imx6_pcie->reset_gpio) {
-               gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
+       if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+               gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
                msleep(100);
-               gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
+               gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
        }
        return 0;
 
@@ -523,6 +523,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
 {
        struct imx6_pcie *imx6_pcie;
        struct pcie_port *pp;
+       struct device_node *np = pdev->dev.of_node;
        struct resource *dbi_base;
        struct device_node *node = pdev->dev.of_node;
        int ret;
@@ -544,8 +545,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
                return PTR_ERR(pp->dbi_base);
 
        /* Fetch GPIOs */
-       imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
-                                                       GPIOD_OUT_LOW);
+       imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+       if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+               ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
+                                           GPIOF_OUT_INIT_LOW, "PCIe reset");
+               if (ret) {
+                       dev_err(&pdev->dev, "unable to get reset gpio\n");
+                       return ret;
+               }
+       }
 
        /* Fetch clocks */
        imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
index e982010f0ed19e0d0379da4a1c02334a1db9f175..342b6918bbde8534b0f89bade814624316a54e9d 100644 (file)
@@ -636,7 +636,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
        u8 *data = (u8 *) buf;
 
        /* Several chips lock up trying to read undefined config space */
-       if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0)
+       if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
                size = dev->cfg_size;
        else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
                size = 128;
index d0fb93481573ad3282e917e09d4852c723ecbd15..a814bbb80fcb3d1ddbf35508b7dcf5808f3d0e5d 100644 (file)
@@ -97,6 +97,7 @@ static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
 struct pci_vpd_ops {
        ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
        ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+       int (*set_size)(struct pci_dev *dev, size_t len);
 };
 
 struct pci_vpd {
index 32346b5a8a119789c1857c90ec13902616ace318..f70090897fdf19c9777c332401dd01de7b1efc52 100644 (file)
@@ -737,8 +737,19 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
                        break;
                case CPU_PM_EXIT:
                case CPU_PM_ENTER_FAILED:
-                        /* Restore and enable the counter */
-                       armpmu_start(event, PERF_EF_RELOAD);
+                        /*
+                         * Restore and enable the counter.
+                         * armpmu_start() indirectly calls
+                         *
+                         * perf_event_update_userpage()
+                         *
+                         * that requires RCU read locking to be functional,
+                         * wrap the call within RCU_NONIDLE to make the
+                         * RCU subsystem aware this cpu is not idle from
+                         * an RCU perspective for the armpmu_start() call
+                         * duration.
+                         */
+                       RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
                        break;
                default:
                        break;
index 10ce6cba4455c206e8ab23906db520b880d4a95a..09356684c32f6a78f03dbc5ef1dd7addc828bf08 100644 (file)
@@ -127,8 +127,10 @@ static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
        arg0.integer.value = reg;
 
        status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret);
+       if (ACPI_FAILURE(status))
+               return -EINVAL;
        *ret = lret;
-       return (status != AE_OK) ? -EINVAL : 0;
+       return 0;
 }
 
 /**
@@ -173,6 +175,7 @@ static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
 DEFINE_CONV(normal, 1, 2, 3);
 DEFINE_CONV(y_inverted, 1, -2, 3);
 DEFINE_CONV(x_inverted, -1, 2, 3);
+DEFINE_CONV(x_inverted_usd, -1, 2, -3);
 DEFINE_CONV(z_inverted, 1, 2, -3);
 DEFINE_CONV(xy_swap, 2, 1, 3);
 DEFINE_CONV(xy_rotated_left, -2, 1, 3);
@@ -236,6 +239,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
        AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
        AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
        AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
+       AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
        AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
        AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
        AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
index f93abc8c1424ad956f62b8dbb5a6dadffd6b4c5b..a818db6aa08f22dfaf8eca3a5b02049a31363cbd 100644 (file)
@@ -91,6 +91,8 @@ static int intel_hid_pl_resume_handler(struct device *device)
 }
 
 static const struct dev_pm_ops intel_hid_pl_pm_ops = {
+       .freeze  = intel_hid_pl_suspend_handler,
+       .restore  = intel_hid_pl_resume_handler,
        .suspend  = intel_hid_pl_suspend_handler,
        .resume  = intel_hid_pl_resume_handler,
 };
index 3fb1d85c70a89d8d89ab700627691f70a63d03c1..6f497e80c9df220df88563737f33ff701569f31b 100644 (file)
@@ -687,8 +687,8 @@ static int ipc_plat_get_res(struct platform_device *pdev)
        ipcdev.acpi_io_size = size;
        dev_info(&pdev->dev, "io res: %pR\n", res);
 
-       /* This is index 0 to cover BIOS data register */
        punit_res = punit_res_array;
+       /* This is index 0 to cover BIOS data register */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_BIOS_DATA_INDEX);
        if (!res) {
@@ -698,55 +698,51 @@ static int ipc_plat_get_res(struct platform_device *pdev)
        *punit_res = *res;
        dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res);
 
+       /* This is index 1 to cover BIOS interface register */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_BIOS_IFACE_INDEX);
        if (!res) {
                dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n");
                return -ENXIO;
        }
-       /* This is index 1 to cover BIOS interface register */
        *++punit_res = *res;
        dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res);
 
+       /* This is index 2 to cover ISP data register, optional */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_ISP_DATA_INDEX);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get res of punit ISP data\n");
-               return -ENXIO;
+       ++punit_res;
+       if (res) {
+               *punit_res = *res;
+               dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
        }
-       /* This is index 2 to cover ISP data register */
-       *++punit_res = *res;
-       dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
 
+       /* This is index 3 to cover ISP interface register, optional */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_ISP_IFACE_INDEX);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get res of punit ISP iface\n");
-               return -ENXIO;
+       ++punit_res;
+       if (res) {
+               *punit_res = *res;
+               dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
        }
-       /* This is index 3 to cover ISP interface register */
-       *++punit_res = *res;
-       dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
 
+       /* This is index 4 to cover GTD data register, optional */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_GTD_DATA_INDEX);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get res of punit GTD data\n");
-               return -ENXIO;
+       ++punit_res;
+       if (res) {
+               *punit_res = *res;
+               dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
        }
-       /* This is index 4 to cover GTD data register */
-       *++punit_res = *res;
-       dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
 
+       /* This is index 5 to cover GTD interface register, optional */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_GTD_IFACE_INDEX);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get res of punit GTD iface\n");
-               return -ENXIO;
+       ++punit_res;
+       if (res) {
+               *punit_res = *res;
+               dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
        }
-       /* This is index 5 to cover GTD interface register */
-       *++punit_res = *res;
-       dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_IPC_INDEX);
index bd875409a02dd3fbc13f8b61d372604e62f8abf1..a47a41fc10ad77c427158811857da4bc21cd747f 100644 (file)
@@ -227,6 +227,11 @@ static int intel_punit_get_bars(struct platform_device *pdev)
        struct resource *res;
        void __iomem *addr;
 
+       /*
+        * The following resources are required
+        * - BIOS_IPC BASE_DATA
+        * - BIOS_IPC BASE_IFACE
+        */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        addr = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(addr))
@@ -239,29 +244,40 @@ static int intel_punit_get_bars(struct platform_device *pdev)
                return PTR_ERR(addr);
        punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr;
 
+       /*
+        * The following resources are optional
+        * - ISPDRIVER_IPC BASE_DATA
+        * - ISPDRIVER_IPC BASE_IFACE
+        * - GTDRIVER_IPC BASE_DATA
+        * - GTDRIVER_IPC BASE_IFACE
+        */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-       addr = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(addr))
-               return PTR_ERR(addr);
-       punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+       if (res) {
+               addr = devm_ioremap_resource(&pdev->dev, res);
+               if (!IS_ERR(addr))
+                       punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
-       addr = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(addr))
-               return PTR_ERR(addr);
-       punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+       if (res) {
+               addr = devm_ioremap_resource(&pdev->dev, res);
+               if (!IS_ERR(addr))
+                       punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
-       addr = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(addr))
-               return PTR_ERR(addr);
-       punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+       if (res) {
+               addr = devm_ioremap_resource(&pdev->dev, res);
+               if (!IS_ERR(addr))
+                       punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
-       addr = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(addr))
-               return PTR_ERR(addr);
-       punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+       if (res) {
+               addr = devm_ioremap_resource(&pdev->dev, res);
+               if (!IS_ERR(addr))
+                       punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+       }
 
        return 0;
 }
index 397119f83e82384913a305065a70f1ca7ae815d8..781bd10ca7ac5ba147e4e718d1d54577a7626e33 100644 (file)
@@ -659,7 +659,7 @@ static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig,
 static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
 {
        u32 telem_ctrl = 0;
-       int ret;
+       int ret = 0;
 
        mutex_lock(&(telm_conf->telem_lock));
        if (ioss_period) {
index e305ab541a2227e1f94ff3e80a0da369ed385946..9255ff3ee81ac74e5fc269f256aed955132e730d 100644 (file)
@@ -7972,10 +7972,12 @@ static int fan_get_status_safe(u8 *status)
                fan_update_desired_level(s);
        mutex_unlock(&fan_mutex);
 
+       if (rc)
+               return rc;
        if (status)
                *status = s;
 
-       return rc;
+       return 0;
 }
 
 static int fan_get_speed(unsigned int *speed)
index 7225ac6b3df5be2faa7382662bbb708286813b8a..fad968eb75f628a8a40abeb62ccc08c61b3d435c 100644 (file)
@@ -392,7 +392,7 @@ static const struct regmap_config fsl_pwm_regmap_config = {
 
        .max_register = FTM_PWMLOAD,
        .volatile_reg = fsl_pwm_volatile_reg,
-       .cache_type = REGCACHE_RBTREE,
+       .cache_type = REGCACHE_FLAT,
 };
 
 static int fsl_pwm_probe(struct platform_device *pdev)
index b2156ee5bae1a50f72b8cbf69d68741ef0082671..ecb7dbae9be9daa0ee96ede34a446a78e8e7debd 100644 (file)
@@ -863,7 +863,7 @@ out:
  * A user-initiated temperature conversion is not started by this function,
  * so the temperature is updated once every 64 seconds.
  */
-static int ds3231_hwmon_read_temp(struct device *dev, s16 *mC)
+static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC)
 {
        struct ds1307 *ds1307 = dev_get_drvdata(dev);
        u8 temp_buf[2];
@@ -892,7 +892,7 @@ static ssize_t ds3231_hwmon_show_temp(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
        int ret;
-       s16 temp;
+       s32 temp;
 
        ret = ds3231_hwmon_read_temp(dev, &temp);
        if (ret)
@@ -1531,7 +1531,7 @@ read_rtc:
                return PTR_ERR(ds1307->rtc);
        }
 
-       if (ds1307_can_wakeup_device) {
+       if (ds1307_can_wakeup_device && ds1307->client->irq <= 0) {
                /* Disable request for an IRQ */
                want_irq = false;
                dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n");
index 1bce9cf51b1e917632fe97bcbf636293e5442adc..b83908670a9ab0a229029353b669cbd82956eca2 100644 (file)
@@ -756,15 +756,16 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
        blk_cleanup_queue(dev_info->dcssblk_queue);
        dev_info->gd->queue = NULL;
        put_disk(dev_info->gd);
-       device_unregister(&dev_info->dev);
 
        /* unload all related segments */
        list_for_each_entry(entry, &dev_info->seg_list, lh)
                segment_unload(entry->segment_name);
 
-       put_device(&dev_info->dev);
        up_write(&dcssblk_devices_sem);
 
+       device_unregister(&dev_info->dev);
+       put_device(&dev_info->dev);
+
        rc = count;
 out_buf:
        kfree(local_buf);
index 75d9896deccb96da19432dcf7b96d339651d625e..e6f54d3b89690613e94d388f1e325adfd32b25ad 100644 (file)
@@ -303,7 +303,7 @@ static void scm_blk_request(struct request_queue *rq)
                if (req->cmd_type != REQ_TYPE_FS) {
                        blk_start_request(req);
                        blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
-                       blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, -EIO);
                        continue;
                }
 
index e16a49b507efbd57734b0b8f522d0e2a5dfe03ce..0058d9fbf9314b4938fb9009c201ac1bc0a20573 100644 (file)
@@ -663,14 +663,14 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
 /* this is called once with whichever end is closed last */
 static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
 {
-       struct inode *ptmx_inode;
+       struct pts_fs_info *fsi;
 
        if (tty->driver->subtype == PTY_TYPE_MASTER)
-               ptmx_inode = tty->driver_data;
+               fsi = tty->driver_data;
        else
-               ptmx_inode = tty->link->driver_data;
-       devpts_kill_index(ptmx_inode, tty->index);
-       devpts_del_ref(ptmx_inode);
+               fsi = tty->link->driver_data;
+       devpts_kill_index(fsi, tty->index);
+       devpts_put_ref(fsi);
 }
 
 static const struct tty_operations ptm_unix98_ops = {
@@ -720,6 +720,7 @@ static const struct tty_operations pty_unix98_ops = {
 
 static int ptmx_open(struct inode *inode, struct file *filp)
 {
+       struct pts_fs_info *fsi;
        struct tty_struct *tty;
        struct inode *slave_inode;
        int retval;
@@ -734,47 +735,41 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        if (retval)
                return retval;
 
+       fsi = devpts_get_ref(inode, filp);
+       retval = -ENODEV;
+       if (!fsi)
+               goto out_free_file;
+
        /* find a device that is not in use. */
        mutex_lock(&devpts_mutex);
-       index = devpts_new_index(inode);
-       if (index < 0) {
-               retval = index;
-               mutex_unlock(&devpts_mutex);
-               goto err_file;
-       }
-
+       index = devpts_new_index(fsi);
        mutex_unlock(&devpts_mutex);
 
-       mutex_lock(&tty_mutex);
-       tty = tty_init_dev(ptm_driver, index);
+       retval = index;
+       if (index < 0)
+               goto out_put_ref;
 
-       if (IS_ERR(tty)) {
-               retval = PTR_ERR(tty);
-               goto out;
-       }
 
+       mutex_lock(&tty_mutex);
+       tty = tty_init_dev(ptm_driver, index);
        /* The tty returned here is locked so we can safely
           drop the mutex */
        mutex_unlock(&tty_mutex);
 
-       set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
-       tty->driver_data = inode;
+       retval = PTR_ERR(tty);
+       if (IS_ERR(tty))
+               goto out;
 
        /*
-        * In the case where all references to ptmx inode are dropped and we
-        * still have /dev/tty opened pointing to the master/slave pair (ptmx
-        * is closed/released before /dev/tty), we must make sure that the inode
-        * is still valid when we call the final pty_unix98_shutdown, thus we
-        * hold an additional reference to the ptmx inode. For the same /dev/tty
-        * last close case, we also need to make sure the super_block isn't
-        * destroyed (devpts instance unmounted), before /dev/tty is closed and
-        * on its release devpts_kill_index is called.
+        * From here on out, the tty is "live", and the index and
+        * fsi will be killed/put by the tty_release()
         */
-       devpts_add_ref(inode);
+       set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+       tty->driver_data = fsi;
 
        tty_add_file(tty, filp);
 
-       slave_inode = devpts_pty_new(inode,
+       slave_inode = devpts_pty_new(fsi,
                        MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index,
                        tty->link);
        if (IS_ERR(slave_inode)) {
@@ -793,12 +788,14 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        return 0;
 err_release:
        tty_unlock(tty);
+       // This will also put-ref the fsi
        tty_release(inode, filp);
        return retval;
 out:
-       mutex_unlock(&tty_mutex);
-       devpts_kill_index(inode, index);
-err_file:
+       devpts_kill_index(fsi, index);
+out_put_ref:
+       devpts_put_ref(fsi);
+out_free_file:
        tty_free_file(filp);
        return retval;
 }
index 83fd30b0577c55f33eace402617ff3a2a4b09126..a6c4a1b895bd4e5acdab56b4a5a5abf512be6244 100644 (file)
@@ -744,11 +744,15 @@ static void acm_tty_flush_chars(struct tty_struct *tty)
        int err;
        unsigned long flags;
 
+       if (!cur) /* nothing to do */
+               return;
+
        acm->putbuffer = NULL;
        err = usb_autopm_get_interface_async(acm->control);
        spin_lock_irqsave(&acm->write_lock, flags);
        if (err < 0) {
                cur->use = 0;
+               acm->putbuffer = cur;
                goto out;
        }
 
index f9d42cf23e55f80104066f6c38b6deda9943f458..7859d738df418f5c3447a8f524d4d8a922299299 100644 (file)
@@ -73,6 +73,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
                if (companion->bus != pdev->bus ||
                                PCI_SLOT(companion->devfn) != slot)
                        continue;
+
+               /*
+                * Companion device should be either UHCI,OHCI or EHCI host
+                * controller, otherwise skip.
+                */
+               if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
+                               companion->class != CL_EHCI)
+                       continue;
+
                companion_hcd = pci_get_drvdata(companion);
                if (!companion_hcd || !companion_hcd->self.root_hub)
                        continue;
index 80c1de239e9acef58b0e92ce2d4add8e437320dd..bad0d1f9a41d4ff9edbcb186eb86238abf878c74 100644 (file)
@@ -1861,6 +1861,12 @@ no_bw:
        kfree(xhci->rh_bw);
        kfree(xhci->ext_caps);
 
+       xhci->usb2_ports = NULL;
+       xhci->usb3_ports = NULL;
+       xhci->port_array = NULL;
+       xhci->rh_bw = NULL;
+       xhci->ext_caps = NULL;
+
        xhci->page_size = 0;
        xhci->page_shift = 0;
        xhci->bus_state[0].bus_suspended = 0;
index f0640b7a1c42e2b82c4b8c6071fa0c43561ab7e7..48672fac7ff3eb22389acfb4889a513e8ac79948 100644 (file)
@@ -48,6 +48,7 @@
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI                0xa12f
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI       0x9d2f
 #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI             0x0aa8
+#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI             0x1aa8
 
 static const char hcd_name[] = "xhci_hcd";
 
@@ -155,7 +156,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
                 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
                 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
-                pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
+                pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
+                pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
                xhci->quirks |= XHCI_PME_STUCK_QUIRK;
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -302,6 +304,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
        struct xhci_hcd *xhci;
 
        xhci = hcd_to_xhci(pci_get_drvdata(dev));
+       xhci->xhc_state |= XHCI_STATE_REMOVING;
        if (xhci->shared_hcd) {
                usb_remove_hcd(xhci->shared_hcd);
                usb_put_hcd(xhci->shared_hcd);
index 5c15e9bc5f7a418e2c56645a2a753e659d20167a..474b5fa149007b8869f56671b6e3fed1050b9edf 100644 (file)
@@ -39,12 +39,25 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
 
 static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
 {
+       struct usb_hcd *hcd = xhci_to_hcd(xhci);
+
        /*
         * As of now platform drivers don't provide MSI support so we ensure
         * here that the generic code does not try to make a pci_dev from our
         * dev struct in order to setup MSI
         */
        xhci->quirks |= XHCI_PLAT;
+
+       /*
+        * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
+        * to 1. However, these SoCs don't support 64-bit address memory
+        * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
+        * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
+        * xhci_gen_setup().
+        */
+       if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
+           xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
+               xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
 }
 
 /* called during probe() after chip reset completes */
index 5a2e2e3936c4c69e50959e37c04562fbc0439b1f..529c3c40f901df69250cc7d05502bb41177fa329 100644 (file)
@@ -14,7 +14,7 @@
 #include "xhci.h"      /* for hcd_to_xhci() */
 
 enum xhci_plat_type {
-       XHCI_PLAT_TYPE_MARVELL_ARMADA,
+       XHCI_PLAT_TYPE_MARVELL_ARMADA = 1,
        XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
        XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
 };
index 7cf66212ceae0cfbe1c161877044755057373781..99b4ff42f7a0148afa63c0da65371c4bee522664 100644 (file)
@@ -4004,7 +4004,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
        int reserved_trbs = xhci->cmd_ring_reserved_trbs;
        int ret;
 
-       if (xhci->xhc_state) {
+       if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+               (xhci->xhc_state & XHCI_STATE_HALTED)) {
                xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
                return -ESHUTDOWN;
        }
index d51ee0c3cf9f009d3f9098bfa77434a6ed4695b6..9e71c96ad74a8de43f78f611f1b8db5490007995 100644 (file)
@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci)
                                "waited %u microseconds.\n",
                                XHCI_MAX_HALT_USEC);
        if (!ret)
-               xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+               /* clear state flags. Including dying, halted or removing */
+               xhci->xhc_state = 0;
 
        return ret;
 }
@@ -1108,8 +1109,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                /* Resume root hubs only when have pending events. */
                status = readl(&xhci->op_regs->status);
                if (status & STS_EINT) {
-                       usb_hcd_resume_root_hub(hcd);
                        usb_hcd_resume_root_hub(xhci->shared_hcd);
+                       usb_hcd_resume_root_hub(hcd);
                }
        }
 
@@ -1124,10 +1125,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 
        /* Re-enable port polling. */
        xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
-       set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-       usb_hcd_poll_rh_status(hcd);
        set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
        usb_hcd_poll_rh_status(xhci->shared_hcd);
+       set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       usb_hcd_poll_rh_status(hcd);
 
        return retval;
 }
@@ -2773,7 +2774,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
        if (ret <= 0)
                return ret;
        xhci = hcd_to_xhci(hcd);
-       if (xhci->xhc_state & XHCI_STATE_DYING)
+       if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+               (xhci->xhc_state & XHCI_STATE_REMOVING))
                return -ENODEV;
 
        xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
@@ -3820,7 +3822,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
 
        mutex_lock(&xhci->mutex);
 
-       if (xhci->xhc_state)    /* dying or halted */
+       if (xhci->xhc_state)    /* dying, removing or halted */
                goto out;
 
        if (!udev->slot_id) {
@@ -4948,6 +4950,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
                return retval;
        xhci_dbg(xhci, "Reset complete\n");
 
+       /*
+        * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
+        * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
+        * address memory pointers actually. So, this driver clears the AC64
+        * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
+        * DMA_BIT_MASK(32)) in this xhci_gen_setup().
+        */
+       if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
+               xhci->hcc_params &= ~BIT(0);
+
        /* Set dma_mask and coherent_dma_mask to 64-bits,
         * if xHC supports 64-bit addressing */
        if (HCC_64BIT_ADDR(xhci->hcc_params) &&
index e293e0974f48257e32653e9ae30501d09c64ea1c..6c629c97f8ad09e6e292fc4513206cb9a5860ec1 100644 (file)
@@ -1605,6 +1605,7 @@ struct xhci_hcd {
  */
 #define XHCI_STATE_DYING       (1 << 0)
 #define XHCI_STATE_HALTED      (1 << 1)
+#define XHCI_STATE_REMOVING    (1 << 2)
        /* Statistics */
        int                     error_bitmask;
        unsigned int            quirks;
@@ -1641,6 +1642,7 @@ struct xhci_hcd {
 #define XHCI_PME_STUCK_QUIRK   (1 << 20)
 #define XHCI_MTK_HOST          (1 << 21)
 #define XHCI_SSIC_PORT_UNUSED  (1 << 22)
+#define XHCI_NO_64BIT_SUPPORT  (1 << 23)
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
        /* There are two roothubs to keep track of bus suspend info for */
index 13e4cc31bc79e61a11539c42b8ea182b3dd6b3af..16bc679dc2fc4b807d0b175ba4914102ce964ec2 100644 (file)
@@ -2,7 +2,7 @@
  * USB Attached SCSI
  * Note that this is not the same as the USB Mass Storage driver
  *
- * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2014
+ * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
  * Copyright Matthew Wilcox for Intel Corp, 2010
  * Copyright Sarah Sharp for Intel Corp, 2010
  *
@@ -781,6 +781,17 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
        return SUCCESS;
 }
 
+static int uas_target_alloc(struct scsi_target *starget)
+{
+       struct uas_dev_info *devinfo = (struct uas_dev_info *)
+                       dev_to_shost(starget->dev.parent)->hostdata;
+
+       if (devinfo->flags & US_FL_NO_REPORT_LUNS)
+               starget->no_report_luns = 1;
+
+       return 0;
+}
+
 static int uas_slave_alloc(struct scsi_device *sdev)
 {
        struct uas_dev_info *devinfo =
@@ -824,7 +835,6 @@ static int uas_slave_configure(struct scsi_device *sdev)
        if (devinfo->flags & US_FL_BROKEN_FUA)
                sdev->broken_fua = 1;
 
-       scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
        return 0;
 }
 
@@ -832,6 +842,7 @@ static struct scsi_host_template uas_host_template = {
        .module = THIS_MODULE,
        .name = "uas",
        .queuecommand = uas_queuecommand,
+       .target_alloc = uas_target_alloc,
        .slave_alloc = uas_slave_alloc,
        .slave_configure = uas_slave_configure,
        .eh_abort_handler = uas_eh_abort_handler,
@@ -956,6 +967,12 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
        if (result)
                goto set_alt0;
 
+       /*
+        * 1 tag is reserved for untagged commands +
+        * 1 tag to avoid off by one errors in some bridge firmwares
+        */
+       shost->can_queue = devinfo->qdepth - 2;
+
        usb_set_intfdata(intf, shost);
        result = scsi_add_host(shost, &intf->dev);
        if (result)
index ccc113e83d88e2b1e69e207989a5d5b49d526ae2..53341a77d89f275b44f9baf44f6caa25d7675802 100644 (file)
@@ -64,6 +64,13 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_ATA_1X),
 
+/* Reported-by: David Webb <djw@noc.ac.uk> */
+UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
+               "Seagate",
+               "Expansion Desk",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_REPORT_LUNS),
+
 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
 UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
                "Seagate",
index 43576ed31ccdc4bfcfcafba0f95e6006a6bd63b6..9de988a0f856d3d8e220b47e7fe1c8e93cd11c00 100644 (file)
@@ -482,7 +482,7 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
                        US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
                        US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
                        US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
-                       US_FL_MAX_SECTORS_240);
+                       US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
 
        p = quirks;
        while (*p) {
@@ -532,6 +532,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
                case 'i':
                        f |= US_FL_IGNORE_DEVICE;
                        break;
+               case 'j':
+                       f |= US_FL_NO_REPORT_LUNS;
+                       break;
                case 'l':
                        f |= US_FL_NOT_LOCKABLE;
                        break;
index fe274b5851c77ff5e59b7c88f205cbf09ec61114..93e66a9148b90de9a3308d3b4b49a359414c7363 100644 (file)
@@ -440,13 +440,14 @@ static int clcdfb_register(struct clcd_fb *fb)
                fb->off_ienb = CLCD_PL111_IENB;
                fb->off_cntl = CLCD_PL111_CNTL;
        } else {
-#ifdef CONFIG_ARCH_VERSATILE
-               fb->off_ienb = CLCD_PL111_IENB;
-               fb->off_cntl = CLCD_PL111_CNTL;
-#else
-               fb->off_ienb = CLCD_PL110_IENB;
-               fb->off_cntl = CLCD_PL110_CNTL;
-#endif
+               if (of_machine_is_compatible("arm,versatile-ab") ||
+                   of_machine_is_compatible("arm,versatile-pb")) {
+                       fb->off_ienb = CLCD_PL111_IENB;
+                       fb->off_cntl = CLCD_PL111_CNTL;
+               } else {
+                       fb->off_ienb = CLCD_PL110_IENB;
+                       fb->off_cntl = CLCD_PL110_CNTL;
+               }
        }
 
        fb->clk = clk_get(&fb->dev->dev, NULL);
index abfd1f6e33275b6d3b192df0c504669ab771c5e7..1954ec913ce5d188e47bc90f634c3a2909beb40a 100644 (file)
@@ -200,20 +200,16 @@ static struct omap_dss_driver sharp_ls_ops = {
 static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
                  char *desc, struct gpio_desc **gpiod)
 {
-       struct gpio_desc *gd;
        int r;
 
-       *gpiod = NULL;
-
        r = devm_gpio_request_one(dev, gpio, flags, desc);
-       if (r)
+       if (r) {
+               *gpiod = NULL;
                return r == -ENOENT ? 0 : r;
+       }
 
-       gd = gpio_to_desc(gpio);
-       if (IS_ERR(gd))
-               return PTR_ERR(gd) == -ENOENT ? 0 : PTR_ERR(gd);
+       *gpiod = gpio_to_desc(gpio);
 
-       *gpiod = gd;
        return 0;
 }
 
index 7f5804537d3065c3baab375a061f30db0c4bd7ea..2fc8c43ce531de02379c93e85c03f95c0d3ca793 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/ratelimit.h>
 #include <linux/bio.h>
 #include <linux/dcache.h>
+#include <linux/namei.h>
 #include <linux/fscrypto.h>
 #include <linux/ecryptfs.h>
 
@@ -81,13 +82,14 @@ EXPORT_SYMBOL(fscrypt_release_ctx);
 /**
  * fscrypt_get_ctx() - Gets an encryption context
  * @inode:       The inode for which we are doing the crypto
+ * @gfp_flags:   The gfp flag for memory allocation
  *
  * Allocates and initializes an encryption context.
  *
  * Return: An allocated and initialized encryption context on success; error
  * value or NULL otherwise.
  */
-struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode)
+struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
 {
        struct fscrypt_ctx *ctx = NULL;
        struct fscrypt_info *ci = inode->i_crypt_info;
@@ -113,7 +115,7 @@ struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode)
                list_del(&ctx->free_list);
        spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
        if (!ctx) {
-               ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
+               ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
                if (!ctx)
                        return ERR_PTR(-ENOMEM);
                ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
@@ -147,7 +149,8 @@ typedef enum {
 
 static int do_page_crypto(struct inode *inode,
                        fscrypt_direction_t rw, pgoff_t index,
-                       struct page *src_page, struct page *dest_page)
+                       struct page *src_page, struct page *dest_page,
+                       gfp_t gfp_flags)
 {
        u8 xts_tweak[FS_XTS_TWEAK_SIZE];
        struct skcipher_request *req = NULL;
@@ -157,7 +160,7 @@ static int do_page_crypto(struct inode *inode,
        struct crypto_skcipher *tfm = ci->ci_ctfm;
        int res = 0;
 
-       req = skcipher_request_alloc(tfm, GFP_NOFS);
+       req = skcipher_request_alloc(tfm, gfp_flags);
        if (!req) {
                printk_ratelimited(KERN_ERR
                                "%s: crypto_request_alloc() failed\n",
@@ -199,10 +202,9 @@ static int do_page_crypto(struct inode *inode,
        return 0;
 }
 
-static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx)
+static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
 {
-       ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool,
-                                                       GFP_NOWAIT);
+       ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
        if (ctx->w.bounce_page == NULL)
                return ERR_PTR(-ENOMEM);
        ctx->flags |= FS_WRITE_PATH_FL;
@@ -213,6 +215,7 @@ static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx)
  * fscypt_encrypt_page() - Encrypts a page
  * @inode:          The inode for which the encryption should take place
  * @plaintext_page: The page to encrypt. Must be locked.
+ * @gfp_flags:      The gfp flag for memory allocation
  *
  * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
  * encryption context.
@@ -225,7 +228,7 @@ static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx)
  * error value or NULL.
  */
 struct page *fscrypt_encrypt_page(struct inode *inode,
-                               struct page *plaintext_page)
+                               struct page *plaintext_page, gfp_t gfp_flags)
 {
        struct fscrypt_ctx *ctx;
        struct page *ciphertext_page = NULL;
@@ -233,18 +236,19 @@ struct page *fscrypt_encrypt_page(struct inode *inode,
 
        BUG_ON(!PageLocked(plaintext_page));
 
-       ctx = fscrypt_get_ctx(inode);
+       ctx = fscrypt_get_ctx(inode, gfp_flags);
        if (IS_ERR(ctx))
                return (struct page *)ctx;
 
        /* The encryption operation will require a bounce page. */
-       ciphertext_page = alloc_bounce_page(ctx);
+       ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
        if (IS_ERR(ciphertext_page))
                goto errout;
 
        ctx->w.control_page = plaintext_page;
        err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index,
-                                       plaintext_page, ciphertext_page);
+                                       plaintext_page, ciphertext_page,
+                                       gfp_flags);
        if (err) {
                ciphertext_page = ERR_PTR(err);
                goto errout;
@@ -275,7 +279,7 @@ int fscrypt_decrypt_page(struct page *page)
        BUG_ON(!PageLocked(page));
 
        return do_page_crypto(page->mapping->host,
-                       FS_DECRYPT, page->index, page, page);
+                       FS_DECRYPT, page->index, page, page, GFP_NOFS);
 }
 EXPORT_SYMBOL(fscrypt_decrypt_page);
 
@@ -289,11 +293,11 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
 
        BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
 
-       ctx = fscrypt_get_ctx(inode);
+       ctx = fscrypt_get_ctx(inode, GFP_NOFS);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
-       ciphertext_page = alloc_bounce_page(ctx);
+       ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
        if (IS_ERR(ciphertext_page)) {
                err = PTR_ERR(ciphertext_page);
                goto errout;
@@ -301,11 +305,12 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
 
        while (len--) {
                err = do_page_crypto(inode, FS_ENCRYPT, lblk,
-                                               ZERO_PAGE(0), ciphertext_page);
+                                       ZERO_PAGE(0), ciphertext_page,
+                                       GFP_NOFS);
                if (err)
                        goto errout;
 
-               bio = bio_alloc(GFP_KERNEL, 1);
+               bio = bio_alloc(GFP_NOWAIT, 1);
                if (!bio) {
                        err = -ENOMEM;
                        goto errout;
@@ -345,13 +350,20 @@ EXPORT_SYMBOL(fscrypt_zeroout_range);
  */
 static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
 {
-       struct inode *dir = d_inode(dentry->d_parent);
-       struct fscrypt_info *ci = dir->i_crypt_info;
+       struct dentry *dir;
+       struct fscrypt_info *ci;
        int dir_has_key, cached_with_key;
 
-       if (!dir->i_sb->s_cop->is_encrypted(dir))
+       if (flags & LOOKUP_RCU)
+               return -ECHILD;
+
+       dir = dget_parent(dentry);
+       if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
+               dput(dir);
                return 0;
+       }
 
+       ci = d_inode(dir)->i_crypt_info;
        if (ci && ci->ci_keyring_key &&
            (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
                                          (1 << KEY_FLAG_REVOKED) |
@@ -363,6 +375,7 @@ static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
        cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
        spin_unlock(&dentry->d_lock);
        dir_has_key = (ci != NULL);
+       dput(dir);
 
        /*
         * If the dentry was cached without the key, and it is a
index bece948b363df5b8d331252cd9468d1bfebead68..8580831ed237f4e81df7eb607bcd1a89c3766f33 100644 (file)
@@ -457,7 +457,7 @@ struct dentry *debugfs_create_automount(const char *name,
        if (unlikely(!inode))
                return failed_creating(dentry);
 
-       inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
+       make_empty_dir_inode(inode);
        inode->i_flags |= S_AUTOMOUNT;
        inode->i_private = data;
        dentry->d_fsdata = (void *)f;
index 655f21f991606b5bf2bef600a54b814994dbed98..0af8e7d70d2751afa67fd85867ef107a5c4ee3eb 100644 (file)
@@ -128,6 +128,7 @@ static const match_table_t tokens = {
 struct pts_fs_info {
        struct ida allocated_ptys;
        struct pts_mount_opts mount_opts;
+       struct super_block *sb;
        struct dentry *ptmx_dentry;
 };
 
@@ -358,7 +359,7 @@ static const struct super_operations devpts_sops = {
        .show_options   = devpts_show_options,
 };
 
-static void *new_pts_fs_info(void)
+static void *new_pts_fs_info(struct super_block *sb)
 {
        struct pts_fs_info *fsi;
 
@@ -369,6 +370,7 @@ static void *new_pts_fs_info(void)
        ida_init(&fsi->allocated_ptys);
        fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE;
        fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
+       fsi->sb = sb;
 
        return fsi;
 }
@@ -384,7 +386,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
        s->s_op = &devpts_sops;
        s->s_time_gran = 1;
 
-       s->s_fs_info = new_pts_fs_info();
+       s->s_fs_info = new_pts_fs_info(s);
        if (!s->s_fs_info)
                goto fail;
 
@@ -524,17 +526,14 @@ static struct file_system_type devpts_fs_type = {
  * to the System V naming convention
  */
 
-int devpts_new_index(struct inode *ptmx_inode)
+int devpts_new_index(struct pts_fs_info *fsi)
 {
-       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
-       struct pts_fs_info *fsi;
        int index;
        int ida_ret;
 
-       if (!sb)
+       if (!fsi)
                return -ENODEV;
 
-       fsi = DEVPTS_SB(sb);
 retry:
        if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
                return -ENOMEM;
@@ -564,11 +563,8 @@ retry:
        return index;
 }
 
-void devpts_kill_index(struct inode *ptmx_inode, int idx)
+void devpts_kill_index(struct pts_fs_info *fsi, int idx)
 {
-       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
-       struct pts_fs_info *fsi = DEVPTS_SB(sb);
-
        mutex_lock(&allocated_ptys_lock);
        ida_remove(&fsi->allocated_ptys, idx);
        pty_count--;
@@ -578,21 +574,25 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
 /*
  * pty code needs to hold extra references in case of last /dev/tty close
  */
-
-void devpts_add_ref(struct inode *ptmx_inode)
+struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file)
 {
-       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+       struct super_block *sb;
+       struct pts_fs_info *fsi;
+
+       sb = pts_sb_from_inode(ptmx_inode);
+       if (!sb)
+               return NULL;
+       fsi = DEVPTS_SB(sb);
+       if (!fsi)
+               return NULL;
 
        atomic_inc(&sb->s_active);
-       ihold(ptmx_inode);
+       return fsi;
 }
 
-void devpts_del_ref(struct inode *ptmx_inode)
+void devpts_put_ref(struct pts_fs_info *fsi)
 {
-       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
-
-       iput(ptmx_inode);
-       deactivate_super(sb);
+       deactivate_super(fsi->sb);
 }
 
 /**
@@ -604,22 +604,21 @@ void devpts_del_ref(struct inode *ptmx_inode)
  *
  * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill.
  */
-struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
+struct inode *devpts_pty_new(struct pts_fs_info *fsi, dev_t device, int index,
                void *priv)
 {
        struct dentry *dentry;
-       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+       struct super_block *sb;
        struct inode *inode;
        struct dentry *root;
-       struct pts_fs_info *fsi;
        struct pts_mount_opts *opts;
        char s[12];
 
-       if (!sb)
+       if (!fsi)
                return ERR_PTR(-ENODEV);
 
+       sb = fsi->sb;
        root = sb->s_root;
-       fsi = DEVPTS_SB(sb);
        opts = &fsi->mount_opts;
 
        inode = new_inode(sb);
index db9ae6e18154d851126d9b9ca4785af012893a2d..6a6c27373b5467d11dcfc056b3c17dec5cccde3b 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/random.h>
 #include <linux/scatterlist.h>
 #include <linux/spinlock_types.h>
+#include <linux/namei.h>
 
 #include "ext4_extents.h"
 #include "xattr.h"
@@ -482,6 +483,9 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
        struct ext4_crypt_info *ci;
        int dir_has_key, cached_with_key;
 
+       if (flags & LOOKUP_RCU)
+               return -ECHILD;
+
        dir = dget_parent(dentry);
        if (!ext4_encrypted_inode(d_inode(dir))) {
                dput(dir);
index 53fec0872e60bf8b7fdc3b63ee0a9a36627edff6..5dafb9cef12e7116c545a07cbac5abc16cb76715 100644 (file)
@@ -992,7 +992,7 @@ submit_and_realloc:
                        if (f2fs_encrypted_inode(inode) &&
                                        S_ISREG(inode->i_mode)) {
 
-                               ctx = fscrypt_get_ctx(inode);
+                               ctx = fscrypt_get_ctx(inode, GFP_NOFS);
                                if (IS_ERR(ctx))
                                        goto set_error_page;
 
@@ -1092,14 +1092,24 @@ int do_write_data_page(struct f2fs_io_info *fio)
        }
 
        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+               gfp_t gfp_flags = GFP_NOFS;
 
                /* wait for GCed encrypted page writeback */
                f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
                                                        fio->old_blkaddr);
-
-               fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page);
+retry_encrypt:
+               fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
+                                                               gfp_flags);
                if (IS_ERR(fio->encrypted_page)) {
                        err = PTR_ERR(fio->encrypted_page);
+                       if (err == -ENOMEM) {
+                               /* flush pending ios and wait for a while */
+                               f2fs_flush_merged_bios(F2FS_I_SB(inode));
+                               congestion_wait(BLK_RW_ASYNC, HZ/50);
+                               gfp_flags |= __GFP_NOFAIL;
+                               err = 0;
+                               goto retry_encrypt;
+                       }
                        goto out_writepage;
                }
        }
index 443e07705c2a6099f6491eb8701d0654de3029f7..90d1157a09f9b9b3dcc778d189a99d68c5512154 100644 (file)
@@ -441,7 +441,7 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
 static int f2fs_file_open(struct inode *inode, struct file *filp)
 {
        int ret = generic_file_open(inode, filp);
-       struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
+       struct dentry *dir;
 
        if (!ret && f2fs_encrypted_inode(inode)) {
                ret = fscrypt_get_encryption_info(inode);
@@ -450,9 +450,13 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
                if (!fscrypt_has_encryption_key(inode))
                        return -ENOKEY;
        }
-       if (f2fs_encrypted_inode(dir) &&
-                       !fscrypt_has_permitted_context(dir, inode))
+       dir = dget_parent(file_dentry(filp));
+       if (f2fs_encrypted_inode(d_inode(dir)) &&
+                       !fscrypt_has_permitted_context(d_inode(dir), inode)) {
+               dput(dir);
                return -EPERM;
+       }
+       dput(dir);
        return ret;
 }
 
index e85664b7c7d963522fd7efc938a3d96a27edc651..19f532e7d35e9a501256ab64f76a645bf7d6b2e6 100644 (file)
@@ -72,9 +72,10 @@ int seq_open(struct file *file, const struct seq_operations *op)
 
        mutex_init(&p->lock);
        p->op = op;
-#ifdef CONFIG_USER_NS
-       p->user_ns = file->f_cred->user_ns;
-#endif
+
+       // No refcounting: the lifetime of 'p' is constrained
+       // to the lifetime of the file.
+       p->file = file;
 
        /*
         * Wrappers around seq_open(e.g. swaps_open) need to be
index 461a0558bca4d8d16e81b88e0286e3fa6251ab79..cebecff536a3a6aec1b06c69e32d8bb4ec2f4f29 100644 (file)
@@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
 {
 #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
        return false;
+#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
+       return false;
 #else
        return true;
 #endif
index e0ee0b3000b2da107c975137165fc989777d8a58..358a4db72a27d76e4df8cf41802cce72c8477c92 100644 (file)
 
 #include <linux/errno.h>
 
+struct pts_fs_info;
+
 #ifdef CONFIG_UNIX98_PTYS
 
-int devpts_new_index(struct inode *ptmx_inode);
-void devpts_kill_index(struct inode *ptmx_inode, int idx);
-void devpts_add_ref(struct inode *ptmx_inode);
-void devpts_del_ref(struct inode *ptmx_inode);
+/* Look up a pts fs info and get a ref to it */
+struct pts_fs_info *devpts_get_ref(struct inode *, struct file *);
+void devpts_put_ref(struct pts_fs_info *);
+
+int devpts_new_index(struct pts_fs_info *);
+void devpts_kill_index(struct pts_fs_info *, int);
+
 /* mknod in devpts */
-struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
-               void *priv);
+struct inode *devpts_pty_new(struct pts_fs_info *, dev_t, int, void *);
 /* get private structure */
 void *devpts_get_priv(struct inode *pts_inode);
 /* unlink */
 void devpts_pty_kill(struct inode *inode);
 
-#else
-
-/* Dummy stubs in the no-pty case */
-static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
-static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
-static inline void devpts_add_ref(struct inode *ptmx_inode) { }
-static inline void devpts_del_ref(struct inode *ptmx_inode) { }
-static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
-               dev_t device, int index, void *priv)
-{
-       return ERR_PTR(-EINVAL);
-}
-static inline void *devpts_get_priv(struct inode *pts_inode)
-{
-       return NULL;
-}
-static inline void devpts_pty_kill(struct inode *inode) { }
-
 #endif
 
 
index cd91f75de49b50885669f33c99553c724d543d7a..6027f6bbb0611b20d454ff2ad3ae7330e01deb36 100644 (file)
@@ -263,9 +263,9 @@ static inline void fscrypt_set_d_op(struct dentry *dentry)
 extern struct kmem_cache *fscrypt_info_cachep;
 int fscrypt_initialize(void);
 
-extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *);
+extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t);
 extern void fscrypt_release_ctx(struct fscrypt_ctx *);
-extern struct page *fscrypt_encrypt_page(struct inode *, struct page *);
+extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t);
 extern int fscrypt_decrypt_page(struct page *);
 extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
 extern void fscrypt_pullback_bio_page(struct page **, bool);
@@ -299,7 +299,8 @@ extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
 #endif
 
 /* crypto.c */
-static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i)
+static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i,
+                                                       gfp_t f)
 {
        return ERR_PTR(-EOPNOTSUPP);
 }
@@ -310,7 +311,7 @@ static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
 }
 
 static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i,
-                                               struct page *p)
+                                               struct page *p, gfp_t f)
 {
        return ERR_PTR(-EOPNOTSUPP);
 }
index 8541a913f6a36effd78ef24bb0bc3cd69ce20541..d1f904c8b2cb48ef7df75141134f0b8c2f1d4057 100644 (file)
@@ -828,6 +828,11 @@ struct mlx4_vf_dev {
        u8                      n_ports;
 };
 
+enum mlx4_pci_status {
+       MLX4_PCI_STATUS_DISABLED,
+       MLX4_PCI_STATUS_ENABLED,
+};
+
 struct mlx4_dev_persistent {
        struct pci_dev         *pdev;
        struct mlx4_dev        *dev;
@@ -841,6 +846,8 @@ struct mlx4_dev_persistent {
        u8              state;
        struct mutex    interface_state_mutex; /* protect SW state */
        u8      interface_state;
+       struct mutex            pci_status_mutex; /* sync pci state */
+       enum mlx4_pci_status    pci_status;
 };
 
 struct mlx4_dev {
index ffcff53e3b2b05d77db7a6e3a47a33842a3fc5e5..a55e5be0894f44d01145a0f3fc6d9c6b011e5754 100644 (file)
@@ -1250,78 +1250,20 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
                            unsigned long start, unsigned long nr_pages,
                            int write, int force, struct page **pages,
                            struct vm_area_struct **vmas);
-long get_user_pages6(unsigned long start, unsigned long nr_pages,
+long get_user_pages(unsigned long start, unsigned long nr_pages,
                            int write, int force, struct page **pages,
                            struct vm_area_struct **vmas);
-long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
                    int write, int force, struct page **pages, int *locked);
 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
                               unsigned long start, unsigned long nr_pages,
                               int write, int force, struct page **pages,
                               unsigned int gup_flags);
-long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                    int write, int force, struct page **pages);
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                        struct page **pages);
 
-/* suppress warnings from use in EXPORT_SYMBOL() */
-#ifndef __DISABLE_GUP_DEPRECATED
-#define __gup_deprecated __deprecated
-#else
-#define __gup_deprecated
-#endif
-/*
- * These macros provide backward-compatibility with the old
- * get_user_pages() variants which took tsk/mm.  These
- * functions/macros provide both compile-time __deprecated so we
- * can catch old-style use and not break the build.  The actual
- * functions also have WARN_ON()s to let us know at runtime if
- * the get_user_pages() should have been the "remote" variant.
- *
- * These are hideous, but temporary.
- *
- * If you run into one of these __deprecated warnings, look
- * at how you are calling get_user_pages().  If you are calling
- * it with current/current->mm as the first two arguments,
- * simply remove those arguments.  The behavior will be the same
- * as it is now.  If you are calling it on another task, use
- * get_user_pages_remote() instead.
- *
- * Any questions?  Ask Dave Hansen <dave@sr71.net>
- */
-long
-__gup_deprecated
-get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long start, unsigned long nr_pages,
-               int write, int force, struct page **pages,
-               struct vm_area_struct **vmas);
-#define GUP_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages, ...) \
-       get_user_pages
-#define get_user_pages(...) GUP_MACRO(__VA_ARGS__,     \
-               get_user_pages8, x,                     \
-               get_user_pages6, x, x, x, x, x)(__VA_ARGS__)
-
-__gup_deprecated
-long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long start, unsigned long nr_pages,
-               int write, int force, struct page **pages,
-               int *locked);
-#define GUPL_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages_locked, ...) \
-       get_user_pages_locked
-#define get_user_pages_locked(...) GUPL_MACRO(__VA_ARGS__,     \
-               get_user_pages_locked8, x,                      \
-               get_user_pages_locked6, x, x, x, x)(__VA_ARGS__)
-
-__gup_deprecated
-long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long start, unsigned long nr_pages,
-               int write, int force, struct page **pages);
-#define GUPU_MACRO(_1, _2, _3, _4, _5, _6, _7, get_user_pages_unlocked, ...)   \
-       get_user_pages_unlocked
-#define get_user_pages_unlocked(...) GUPU_MACRO(__VA_ARGS__,   \
-               get_user_pages_unlocked7, x,                    \
-               get_user_pages_unlocked5, x, x, x, x)(__VA_ARGS__)
-
 /* Container for pinned pfns / pages */
 struct frame_vector {
        unsigned int nr_allocated;      /* Number of frames we have space for */
index 004b8133417dc9dd54315b1ca5f15a9cc28e9a2a..932ec74909c6fc79530f262c06eb2ff6ad0e9b21 100644 (file)
@@ -1111,6 +1111,7 @@ void pci_unlock_rescan_remove(void);
 /* Vital product data routines */
 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+int pci_set_vpd_size(struct pci_dev *dev, size_t len);
 
 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
index ac6d872ce067e19fb99bb57e7b0505e20732b7cd..57d146fe44dd84e926199d81a8ca0a74a2d012fc 100644 (file)
@@ -72,6 +72,18 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
 }
 #endif
 
+static inline bool arch_has_pmem_api(void)
+{
+       return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
+}
+
+static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src,
+               size_t size)
+{
+       memcpy(dst, (void __force *) src, size);
+       return 0;
+}
+
 /*
  * memcpy_from_pmem - read from persistent memory with error handling
  * @dst: destination buffer
@@ -83,12 +95,10 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
 static inline int memcpy_from_pmem(void *dst, void __pmem const *src,
                size_t size)
 {
-       return arch_memcpy_from_pmem(dst, src, size);
-}
-
-static inline bool arch_has_pmem_api(void)
-{
-       return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
+       if (arch_has_pmem_api())
+               return arch_memcpy_from_pmem(dst, src, size);
+       else
+               return default_memcpy_from_pmem(dst, src, size);
 }
 
 /**
index 1c33dd7da4a7d860004264e00bd406645df1ed96..4ae95f7e8597b0b43575d04aaf524cf252761e6e 100644 (file)
@@ -98,6 +98,45 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
        if (!is_a_nulls(first))
                first->pprev = &n->next;
 }
+
+/**
+ * hlist_nulls_add_tail_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the end of the specified hlist_nulls,
+ * while permitting racing traversals.  NOTE: tail insertion requires
+ * list traversal.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
+ * or hlist_nulls_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs.  Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
+                                       struct hlist_nulls_head *h)
+{
+       struct hlist_nulls_node *i, *last = NULL;
+
+       for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
+            i = hlist_nulls_next_rcu(i))
+               last = i;
+
+       if (last) {
+               n->next = last->next;
+               n->pprev = &last->next;
+               rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
+       } else {
+               hlist_nulls_add_head_rcu(n, h);
+       }
+}
+
 /**
  * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
  * @tpos:      the type * to use as a loop cursor.
index dde00defbaa52bd2ae380c76e7df305389aa5b8f..f3d45dd42695e1f813d67103a118dc03d27cdcbc 100644 (file)
@@ -7,13 +7,10 @@
 #include <linux/mutex.h>
 #include <linux/cpumask.h>
 #include <linux/nodemask.h>
+#include <linux/fs.h>
+#include <linux/cred.h>
 
 struct seq_operations;
-struct file;
-struct path;
-struct inode;
-struct dentry;
-struct user_namespace;
 
 struct seq_file {
        char *buf;
@@ -27,9 +24,7 @@ struct seq_file {
        struct mutex lock;
        const struct seq_operations *op;
        int poll_event;
-#ifdef CONFIG_USER_NS
-       struct user_namespace *user_ns;
-#endif
+       const struct file *file;
        void *private;
 };
 
@@ -147,7 +142,7 @@ int seq_release_private(struct inode *, struct file *);
 static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
 {
 #ifdef CONFIG_USER_NS
-       return seq->user_ns;
+       return seq->file->f_cred->user_ns;
 #else
        extern struct user_namespace init_user_ns;
        return &init_user_ns;
index 7f5f78bd15ad448414fa8c06fb61fa25fb152b5f..245f57dbbb614e818d2f067f1f5c431d7e9f193b 100644 (file)
@@ -79,6 +79,8 @@
                /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
        US_FLAG(MAX_SECTORS_240,        0x08000000)             \
                /* Sets max_sectors to 240 */                   \
+       US_FLAG(NO_REPORT_LUNS, 0x10000000)                     \
+               /* Cannot handle REPORT_LUNS */                 \
 
 #define US_FLAG(name, value)   US_FL_##name = value ,
 enum { US_DO_ALL_FLAGS };
index c0a92e2c286d6cc0942591dfc02d9b485ae6810e..74c9693d4941dd0f4a08526968a9783727249fb5 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/hardirq.h>
 #include <linux/rcupdate.h>
 #include <net/sock.h>
+#include <net/inet_sock.h>
 
 #ifdef CONFIG_CGROUP_NET_CLASSID
 struct cgroup_cls_state {
@@ -63,11 +64,13 @@ static inline u32 task_get_classid(const struct sk_buff *skb)
         * softirqs always disables bh.
         */
        if (in_serving_softirq()) {
+               struct sock *sk = skb_to_full_sk(skb);
+
                /* If there is an sock_cgroup_classid we'll use that. */
-               if (!skb->sk)
+               if (!sk || !sk_fullsock(sk))
                        return 0;
 
-               classid = sock_cgroup_classid(&skb->sk->sk_cgrp_data);
+               classid = sock_cgroup_classid(&sk->sk_cgrp_data);
        }
 
        return classid;
index 295d291269e2c88ed4930041597a28f7c9a7a2f7..54c779416eec654b31d278a3bd77f07b0a3e4ea8 100644 (file)
@@ -101,6 +101,9 @@ void fib6_force_start_gc(struct net *net);
 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
                                    const struct in6_addr *addr, bool anycast);
 
+struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
+                              int flags);
+
 /*
  *     support functions for ND
  *
index d0aeb97aec5d2b2c40eee41049fa6f64f5e0d503..1be050ada8c5d6bd35cd762d1ac61d9240aaebce 100644 (file)
@@ -959,6 +959,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
 int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
 int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
                                 int addr_len);
+int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
+void ip6_datagram_release_cb(struct sock *sk);
 
 int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
                    int *addr_len);
index 9b0a523bb4280023e4429fcc5379cb5d2d2d419a..6de665bf175041d0b11e5841fe8012196e9b9c98 100644 (file)
@@ -209,6 +209,9 @@ unsigned int inet_addr_type_dev_table(struct net *net,
 void ip_rt_multicast_event(struct in_device *);
 int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
 void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
+struct rtable *rt_dst_alloc(struct net_device *dev,
+                            unsigned int flags, u16 type,
+                            bool nopolicy, bool noxfrm, bool will_cache);
 
 struct in_ifaddr;
 void fib_add_ifaddr(struct in_ifaddr *);
index 6df1ce7a411c548bda4163840a90578b6e1b4cfe..5a404c354f4c787e57a6ab6ca68b3ffc537eb1cc 100644 (file)
@@ -847,6 +847,11 @@ struct sctp_transport {
         */
        ktime_t last_time_heard;
 
+       /* When was the last time that we sent a chunk using this
+        * transport? We use this to check for idle transports
+        */
+       unsigned long last_time_sent;
+
        /* Last time(in jiffies) when cwnd is reduced due to the congestion
         * indication based on ECNE chunk.
         */
@@ -952,7 +957,8 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
                          struct sctp_sock *);
 void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
 void sctp_transport_free(struct sctp_transport *);
-void sctp_transport_reset_timers(struct sctp_transport *);
+void sctp_transport_reset_t3_rtx(struct sctp_transport *);
+void sctp_transport_reset_hb_timer(struct sctp_transport *);
 int sctp_transport_hold(struct sctp_transport *);
 void sctp_transport_put(struct sctp_transport *);
 void sctp_transport_update_rto(struct sctp_transport *, __u32);
index 255d3e03727b737091141c1eb8b2f796afff9d9b..121ffc115c4f722750b5a1cc44e45000d15dd2ef 100644 (file)
@@ -630,7 +630,11 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
 
 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 {
-       hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+       if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+           sk->sk_family == AF_INET6)
+               hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
+       else
+               hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
 }
 
 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
index b91370f61be64a9ef2bcff2d8f20701f5ced09b1..6db10228113fc0cd2ce876035110dd43c6f0c794 100644 (file)
@@ -552,6 +552,8 @@ void tcp_send_ack(struct sock *sk);
 void tcp_send_delayed_ack(struct sock *sk);
 void tcp_send_loss_probe(struct sock *sk);
 bool tcp_schedule_loss_probe(struct sock *sk);
+void tcp_skb_collapse_tstamp(struct sk_buff *skb,
+                            const struct sk_buff *next_skb);
 
 /* tcp_input.c */
 void tcp_resume_early_retransmit(struct sock *sk);
index 2767c55a641edd64b7018a0d7693826ae60dbad9..ca64f0f50b4533df080a2daa6b1cba2fdf5f0a12 100644 (file)
@@ -17,6 +17,8 @@ int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec,
                                    unsigned int verb);
 int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
                             unsigned int *val);
+int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
+                                     unsigned int reg, unsigned int *val);
 int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
                              unsigned int val);
 int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
index b71fd0b5cbad7d375f3188114be65431def88907..813ffb2e22c9a5a9d9049aadb2aa467d734b734c 100644 (file)
@@ -96,6 +96,7 @@ header-y += cyclades.h
 header-y += cycx_cfm.h
 header-y += dcbnl.h
 header-y += dccp.h
+header-y += devlink.h
 header-y += dlmconstants.h
 header-y += dlm_device.h
 header-y += dlm.h
index 2e08f8e9b771f032a17e0305c60fcd6f0403e6f5..618ef77c302ab628cd1a2c517568d4e0a10aa93e 100644 (file)
@@ -1374,6 +1374,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
        }
 
        if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
+           BPF_SIZE(insn->code) == BPF_DW ||
            (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
                verbose("BPF_LD_ABS uses reserved fields\n");
                return -EINVAL;
index 2324ba5310dba49331a3c10876543937363f31b9..ed9410936a220632995d40006f024357d5abe38a 100644 (file)
@@ -1999,6 +1999,7 @@ static inline int get_first_held_lock(struct task_struct *curr,
        return ++i;
 }
 
+#ifdef CONFIG_DEBUG_LOCKDEP
 /*
  * Returns the next chain_key iteration
  */
@@ -2069,6 +2070,7 @@ static void print_collision(struct task_struct *curr,
        printk("\nstack backtrace:\n");
        dump_stack();
 }
+#endif
 
 /*
  * Checks whether the chain and the current held locks are consistent
index 2e78ead309344ce3d00b1c6701c4e452856089be..9b5f04404152c296af3a96132f27cfc80ffa9af9 100644 (file)
@@ -105,16 +105,25 @@ static int r_show(struct seq_file *m, void *v)
 {
        struct resource *root = m->private;
        struct resource *r = v, *p;
+       unsigned long long start, end;
        int width = root->end < 0x10000 ? 4 : 8;
        int depth;
 
        for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
                if (p->parent == root)
                        break;
+
+       if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
+               start = r->start;
+               end = r->end;
+       } else {
+               start = end = 0;
+       }
+
        seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
                        depth * 2, "",
-                       width, (unsigned long long) r->start,
-                       width, (unsigned long long) r->end,
+                       width, start,
+                       width, end,
                        r->name ? r->name : "<BAD>");
        return 0;
 }
index 03dd576e67730fb2870c44512f55c1c0c39b77f3..59fd7c0b119cbc3e0b61137732f76cc3759fb7c7 100644 (file)
@@ -524,7 +524,9 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
                        free_slot = i;
                        continue;
                }
-               if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
+               if (assoc_array_ptr_is_leaf(ptr) &&
+                   ops->compare_object(assoc_array_ptr_to_leaf(ptr),
+                                       index_key)) {
                        pr_devel("replace in slot %d\n", i);
                        edit->leaf_p = &node->slots[i];
                        edit->dead_leaf = node->slots[i];
index abcecdc2d0f23a8ac9884db5297d7b46dfbcea1b..c79d7ea8a38e47b8292d9f9a23bb0744a0efe7c8 100644 (file)
@@ -11,8 +11,7 @@
 /*
  * Detects 64 bits mode
  */
-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
-       || defined(__ppc64__) || defined(__LP64__))
+#if defined(CONFIG_64BIT)
 #define LZ4_ARCH64 1
 #else
 #define LZ4_ARCH64 0
@@ -25,9 +24,7 @@
 typedef struct _U16_S { u16 v; } U16_S;
 typedef struct _U32_S { u32 v; } U32_S;
 typedef struct _U64_S { u64 v; } U64_S;
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)            \
-       || defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6       \
-       && defined(ARM_EFFICIENT_UNALIGNED_ACCESS)
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 
 #define A16(x) (((U16_S *)(x))->v)
 #define A32(x) (((U32_S *)(x))->v)
@@ -35,6 +32,10 @@ typedef struct _U64_S { u64 v; } U64_S;
 
 #define PUT4(s, d) (A32(d) = A32(s))
 #define PUT8(s, d) (A64(d) = A64(s))
+
+#define LZ4_READ_LITTLEENDIAN_16(d, s, p)      \
+       (d = s - A16(p))
+
 #define LZ4_WRITE_LITTLEENDIAN_16(p, v)        \
        do {    \
                A16(p) = v; \
@@ -51,10 +52,13 @@ typedef struct _U64_S { u64 v; } U64_S;
 #define PUT8(s, d) \
        put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
 
-#define LZ4_WRITE_LITTLEENDIAN_16(p, v)        \
-       do {    \
-               put_unaligned(v, (u16 *)(p)); \
-               p += 2; \
+#define LZ4_READ_LITTLEENDIAN_16(d, s, p)      \
+       (d = s - get_unaligned_le16(p))
+
+#define LZ4_WRITE_LITTLEENDIAN_16(p, v)                        \
+       do {                                            \
+               put_unaligned_le16(v, (u16 *)(p));      \
+               p += 2;                                 \
        } while (0)
 #endif
 
@@ -140,9 +144,6 @@ typedef struct _U64_S { u64 v; } U64_S;
 
 #endif
 
-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
-       (d = s - get_unaligned_le16(p))
-
 #define LZ4_WILDCOPY(s, d, e)          \
        do {                            \
                LZ4_COPYPACKET(s, d);   \
index bfbd7096b6edc3c5e87398e93507cb124661b3ab..0c6317b7db38a086191a043b5ec480e4f5264e27 100644 (file)
@@ -898,7 +898,7 @@ static atomic_t nr_wb_congested[2];
 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
 {
        wait_queue_head_t *wqh = &congestion_wqh[sync];
-       enum wb_state bit;
+       enum wb_congested_state bit;
 
        bit = sync ? WB_sync_congested : WB_async_congested;
        if (test_and_clear_bit(bit, &congested->state))
@@ -911,7 +911,7 @@ EXPORT_SYMBOL(clear_wb_congested);
 
 void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
 {
-       enum wb_state bit;
+       enum wb_congested_state bit;
 
        bit = sync ? WB_sync_congested : WB_async_congested;
        if (!test_and_set_bit(bit, &congested->state))
index fb87aea9edc8a69b5b19d131df30aa14c0a3a3e1..c057784c844456f237adc9065bb4c9a3c230fdc6 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1,4 +1,3 @@
-#define __DISABLE_GUP_DEPRECATED 1
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/err.h>
@@ -839,7 +838,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
  *      if (locked)
  *          up_read(&mm->mmap_sem);
  */
-long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
                           int write, int force, struct page **pages,
                           int *locked)
 {
@@ -847,7 +846,7 @@ long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
                                       write, force, pages, NULL, locked, true,
                                       FOLL_TOUCH);
 }
-EXPORT_SYMBOL(get_user_pages_locked6);
+EXPORT_SYMBOL(get_user_pages_locked);
 
 /*
  * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
@@ -892,13 +891,13 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
  * or if "force" shall be set to 1 (get_user_pages_fast misses the
  * "force" parameter).
  */
-long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                             int write, int force, struct page **pages)
 {
        return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
                                         write, force, pages, FOLL_TOUCH);
 }
-EXPORT_SYMBOL(get_user_pages_unlocked5);
+EXPORT_SYMBOL(get_user_pages_unlocked);
 
 /*
  * get_user_pages_remote() - pin user pages in memory
@@ -972,7 +971,7 @@ EXPORT_SYMBOL(get_user_pages_remote);
  * and mm being operated on are the current task's.  We also
  * obviously don't pass FOLL_REMOTE in here.
  */
-long get_user_pages6(unsigned long start, unsigned long nr_pages,
+long get_user_pages(unsigned long start, unsigned long nr_pages,
                int write, int force, struct page **pages,
                struct vm_area_struct **vmas)
 {
@@ -980,7 +979,7 @@ long get_user_pages6(unsigned long start, unsigned long nr_pages,
                                       write, force, pages, vmas, NULL, false,
                                       FOLL_TOUCH);
 }
-EXPORT_SYMBOL(get_user_pages6);
+EXPORT_SYMBOL(get_user_pages);
 
 /**
  * populate_vma_page_range() -  populate a range of pages in the vma.
@@ -1491,7 +1490,6 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                        struct page **pages)
 {
-       struct mm_struct *mm = current->mm;
        int nr, ret;
 
        start &= PAGE_MASK;
@@ -1503,8 +1501,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                start += nr << PAGE_SHIFT;
                pages += nr;
 
-               ret = get_user_pages_unlocked(current, mm, start,
-                                             nr_pages - nr, write, 0, pages);
+               ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
 
                /* Have to be a bit careful with return values */
                if (nr > 0) {
@@ -1519,38 +1516,3 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 }
 
 #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */
-
-long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
-                    unsigned long start, unsigned long nr_pages,
-                    int write, int force, struct page **pages,
-                    struct vm_area_struct **vmas)
-{
-       WARN_ONCE(tsk != current, "get_user_pages() called on remote task");
-       WARN_ONCE(mm != current->mm, "get_user_pages() called on remote mm");
-
-       return get_user_pages6(start, nr_pages, write, force, pages, vmas);
-}
-EXPORT_SYMBOL(get_user_pages8);
-
-long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
-                           unsigned long start, unsigned long nr_pages,
-                           int write, int force, struct page **pages, int *locked)
-{
-       WARN_ONCE(tsk != current, "get_user_pages_locked() called on remote task");
-       WARN_ONCE(mm != current->mm, "get_user_pages_locked() called on remote mm");
-
-       return get_user_pages_locked6(start, nr_pages, write, force, pages, locked);
-}
-EXPORT_SYMBOL(get_user_pages_locked8);
-
-long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
-                                 unsigned long start, unsigned long nr_pages,
-                                 int write, int force, struct page **pages)
-{
-       WARN_ONCE(tsk != current, "get_user_pages_unlocked() called on remote task");
-       WARN_ONCE(mm != current->mm, "get_user_pages_unlocked() called on remote mm");
-
-       return get_user_pages_unlocked5(start, nr_pages, write, force, pages);
-}
-EXPORT_SYMBOL(get_user_pages_unlocked7);
-
index 102e257cc6c30a525be8ea9071711f31c379c311..c8bd59a03c71563b73c146d35e00550b8119d4c0 100644 (file)
@@ -15,8 +15,6 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#define __DISABLE_GUP_DEPRECATED
-
 #include <linux/export.h>
 #include <linux/mm.h>
 #include <linux/vmacache.h>
@@ -161,7 +159,7 @@ finish_or_fault:
  *   slab page or a secondary page from a compound page
  * - don't permit access to VMAs that don't support it, such as I/O mappings
  */
-long get_user_pages6(unsigned long start, unsigned long nr_pages,
+long get_user_pages(unsigned long start, unsigned long nr_pages,
                    int write, int force, struct page **pages,
                    struct vm_area_struct **vmas)
 {
@@ -175,15 +173,15 @@ long get_user_pages6(unsigned long start, unsigned long nr_pages,
        return __get_user_pages(current, current->mm, start, nr_pages, flags,
                                pages, vmas, NULL);
 }
-EXPORT_SYMBOL(get_user_pages6);
+EXPORT_SYMBOL(get_user_pages);
 
-long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
                            int write, int force, struct page **pages,
                            int *locked)
 {
-       return get_user_pages6(start, nr_pages, write, force, pages, NULL);
+       return get_user_pages(start, nr_pages, write, force, pages, NULL);
 }
-EXPORT_SYMBOL(get_user_pages_locked6);
+EXPORT_SYMBOL(get_user_pages_locked);
 
 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
                               unsigned long start, unsigned long nr_pages,
@@ -199,13 +197,13 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
 }
 EXPORT_SYMBOL(__get_user_pages_unlocked);
 
-long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                             int write, int force, struct page **pages)
 {
        return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
                                         write, force, pages, 0);
 }
-EXPORT_SYMBOL(get_user_pages_unlocked5);
+EXPORT_SYMBOL(get_user_pages_unlocked);
 
 /**
  * follow_pfn - look up PFN at a user virtual address
@@ -1989,31 +1987,3 @@ static int __meminit init_admin_reserve(void)
        return 0;
 }
 subsys_initcall(init_admin_reserve);
-
-long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
-                    unsigned long start, unsigned long nr_pages,
-                    int write, int force, struct page **pages,
-                    struct vm_area_struct **vmas)
-{
-       return get_user_pages6(start, nr_pages, write, force, pages, vmas);
-}
-EXPORT_SYMBOL(get_user_pages8);
-
-long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
-                           unsigned long start, unsigned long nr_pages,
-                           int write, int force, struct page **pages,
-                           int *locked)
-{
-       return get_user_pages_locked6(start, nr_pages, write,
-                                     force, pages, locked);
-}
-EXPORT_SYMBOL(get_user_pages_locked8);
-
-long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
-                             unsigned long start, unsigned long nr_pages,
-                             int write, int force, struct page **pages)
-{
-       return get_user_pages_unlocked5(start, nr_pages, write, force, pages);
-}
-EXPORT_SYMBOL(get_user_pages_unlocked7);
-
index 8570bc7744c25cc92e2301f8686cd21a1f26baab..5a61f35412a0595845da4feb2cbd0bc09a1cdbeb 100644 (file)
@@ -370,7 +370,11 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
            left - sizeof(struct ebt_entry_match) < m->match_size)
                return -EINVAL;
 
-       match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
+       match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
+       if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
+               request_module("ebt_%s", m->u.name);
+               match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
+       }
        if (IS_ERR(match))
                return PTR_ERR(match);
        m->u.match = match;
index d04c2d1c8c87d79e89fa9f2a4de97ea9262a7787..e561f9f07d6daa3a13de919334699cc9b82773a7 100644 (file)
@@ -4502,13 +4502,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
                __skb_push(skb, offset);
                err = __vlan_insert_tag(skb, skb->vlan_proto,
                                        skb_vlan_tag_get(skb));
-               if (err)
+               if (err) {
+                       __skb_pull(skb, offset);
                        return err;
+               }
+
                skb->protocol = skb->vlan_proto;
                skb->mac_len += VLAN_HLEN;
-               __skb_pull(skb, offset);
 
                skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
+               __skb_pull(skb, offset);
        }
        __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
        return 0;
index 607a14f20d88011e6de8540b21a69e6527d49df0..b1dc096d22f8c83e771b1df68d7815661ac51bae 100644 (file)
@@ -1034,10 +1034,13 @@ source_ok:
        if (!fld.daddr) {
                fld.daddr = fld.saddr;
 
-               err = -EADDRNOTAVAIL;
                if (dev_out)
                        dev_put(dev_out);
+               err = -EINVAL;
                dev_out = init_net.loopback_dev;
+               if (!dev_out->dn_ptr)
+                       goto out;
+               err = -EADDRNOTAVAIL;
                dev_hold(dev_out);
                if (!fld.daddr) {
                        fld.daddr =
@@ -1110,6 +1113,8 @@ source_ok:
                if (dev_out == NULL)
                        goto out;
                dn_db = rcu_dereference_raw(dev_out->dn_ptr);
+               if (!dn_db)
+                       goto e_inval;
                /* Possible improvement - check all devices for local addr */
                if (dn_dev_islocal(dev_out, fld.daddr)) {
                        dev_put(dev_out);
@@ -1151,6 +1156,8 @@ select_source:
                        dev_put(dev_out);
                dev_out = init_net.loopback_dev;
                dev_hold(dev_out);
+               if (!dev_out->dn_ptr)
+                       goto e_inval;
                fld.flowidn_oif = dev_out->ifindex;
                if (res.fi)
                        dn_fib_info_put(res.fi);
index dd8c80dc32a2216d3705b75cc761bdb156806dc9..8f8713b4388fbfa9a0d36298603995b02718d21d 100644 (file)
@@ -81,6 +81,12 @@ static int __init arptable_filter_init(void)
                return ret;
        }
 
+       ret = arptable_filter_table_init(&init_net);
+       if (ret) {
+               unregister_pernet_subsys(&arptable_filter_net_ops);
+               kfree(arpfilter_ops);
+       }
+
        return ret;
 }
 
index 02c62299d717b9f6c38a5227e3b3ae376e0015b6..60398a9370e7ec45110fc59bc573d83e35df7351 100644 (file)
@@ -1438,9 +1438,9 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
 #endif
 }
 
-static struct rtable *rt_dst_alloc(struct net_device *dev,
-                                  unsigned int flags, u16 type,
-                                  bool nopolicy, bool noxfrm, bool will_cache)
+struct rtable *rt_dst_alloc(struct net_device *dev,
+                           unsigned int flags, u16 type,
+                           bool nopolicy, bool noxfrm, bool will_cache)
 {
        struct rtable *rt;
 
@@ -1468,6 +1468,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev,
 
        return rt;
 }
+EXPORT_SYMBOL(rt_dst_alloc);
 
 /* called in rcu_read_lock() section */
 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -2045,6 +2046,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
                 */
                if (fi && res->prefixlen < 4)
                        fi = NULL;
+       } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
+                  (orig_oif != dev_out->ifindex)) {
+               /* For local routes that require a particular output interface
+                * we do not want to cache the result.  Caching the result
+                * causes incorrect behaviour when there are multiple source
+                * addresses on the interface, the end result being that if the
+                * intended recipient is waiting on that interface for the
+                * packet he won't receive it because it will be delivered on
+                * the loopback interface and the IP_PKTINFO ipi_ifindex will
+                * be set to the loopback interface as well.
+                */
+               fi = NULL;
        }
 
        fnhe = NULL;
index e6e65f79ade82132e1a2e5578ba01f43aed15140..c124c3c12f7c5c848d11e2a59d9e17ba93597411 100644 (file)
@@ -1309,6 +1309,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
        if (skb == tcp_highest_sack(sk))
                tcp_advance_highest_sack(sk, skb);
 
+       tcp_skb_collapse_tstamp(prev, skb);
        tcp_unlink_write_queue(skb, sk);
        sk_wmem_free_skb(sk, skb);
 
@@ -3098,7 +3099,8 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
 
        shinfo = skb_shinfo(skb);
        if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) &&
-           between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1))
+           !before(shinfo->tskey, prior_snd_una) &&
+           before(shinfo->tskey, tcp_sk(sk)->snd_una))
                __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
 }
 
index 7d2dc015cd19a64505c968df43c544adabb65e0e..441ae9da3a233fd96385853197fd3938959449e3 100644 (file)
@@ -2441,6 +2441,20 @@ u32 __tcp_select_window(struct sock *sk)
        return window;
 }
 
+void tcp_skb_collapse_tstamp(struct sk_buff *skb,
+                            const struct sk_buff *next_skb)
+{
+       const struct skb_shared_info *next_shinfo = skb_shinfo(next_skb);
+       u8 tsflags = next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
+
+       if (unlikely(tsflags)) {
+               struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+               shinfo->tx_flags |= tsflags;
+               shinfo->tskey = next_shinfo->tskey;
+       }
+}
+
 /* Collapses two adjacent SKB's during retransmission. */
 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
 {
@@ -2484,6 +2498,8 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
 
        tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
 
+       tcp_skb_collapse_tstamp(skb, next_skb);
+
        sk_wmem_free_skb(sk, next_skb);
 }
 
index 08eed5e16df0d26340bbfabd96ac6dad77895465..a2e7f55a1f6103e7a84d8ca2dd821841fb51f9bc 100644 (file)
@@ -339,8 +339,13 @@ found:
 
                hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
                spin_lock(&hslot2->lock);
-               hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
-                                        &hslot2->head);
+               if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+                       sk->sk_family == AF_INET6)
+                       hlist_nulls_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
+                                                &hslot2->head);
+               else
+                       hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
+                                                &hslot2->head);
                hslot2->count++;
                spin_unlock(&hslot2->lock);
        }
index 27aed1afcf81c0a516bb6768e20b27a6794fb1cc..23cec53b568ac11802173f161ba63af13adac206 100644 (file)
@@ -3255,6 +3255,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                           void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct netdev_notifier_changeupper_info *info;
        struct inet6_dev *idev = __in6_dev_get(dev);
        int run_pending = 0;
        int err;
@@ -3413,6 +3414,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                if (idev)
                        addrconf_type_change(dev, event);
                break;
+
+       case NETDEV_CHANGEUPPER:
+               info = ptr;
+
+               /* flush all routes if dev is linked to or unlinked from
+                * an L3 master device (e.g., VRF)
+                */
+               if (info->upper_dev && netif_is_l3_master(info->upper_dev))
+                       addrconf_ifdown(dev, 0);
        }
 
        return NOTIFY_OK;
@@ -3438,6 +3448,12 @@ static void addrconf_type_change(struct net_device *dev, unsigned long event)
                ipv6_mc_unmap(idev);
 }
 
+static bool addr_is_local(const struct in6_addr *addr)
+{
+       return ipv6_addr_type(addr) &
+               (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
+}
+
 static int addrconf_ifdown(struct net_device *dev, int how)
 {
        struct net *net = dev_net(dev);
@@ -3495,7 +3511,8 @@ restart:
                                 * address is retained on a down event
                                 */
                                if (!keep_addr ||
-                                   !(ifa->flags & IFA_F_PERMANENT)) {
+                                   !(ifa->flags & IFA_F_PERMANENT) ||
+                                   addr_is_local(&ifa->addr)) {
                                        hlist_del_init_rcu(&ifa->addr_lst);
                                        goto restart;
                                }
@@ -3544,7 +3561,8 @@ restart:
                write_unlock_bh(&idev->lock);
                spin_lock_bh(&ifa->lock);
 
-               if (keep_addr && (ifa->flags & IFA_F_PERMANENT)) {
+               if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
+                   !addr_is_local(&ifa->addr)) {
                        /* set state to skip the notifier below */
                        state = INET6_IFADDR_STATE_DEAD;
                        ifa->state = 0;
index 428162155280ca2af782ea9fd9fa26e0d1666d89..9dd3882fe6bf27f9db783a5bac69a5ddbe841b50 100644 (file)
@@ -40,18 +40,114 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
        return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
 }
 
+static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
+
+       memset(fl6, 0, sizeof(*fl6));
+       fl6->flowi6_proto = sk->sk_protocol;
+       fl6->daddr = sk->sk_v6_daddr;
+       fl6->saddr = np->saddr;
+       fl6->flowi6_oif = sk->sk_bound_dev_if;
+       fl6->flowi6_mark = sk->sk_mark;
+       fl6->fl6_dport = inet->inet_dport;
+       fl6->fl6_sport = inet->inet_sport;
+       fl6->flowlabel = np->flow_label;
+
+       if (!fl6->flowi6_oif)
+               fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
+
+       if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr))
+               fl6->flowi6_oif = np->mcast_oif;
+
+       security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
+}
+
+int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
+{
+       struct ip6_flowlabel *flowlabel = NULL;
+       struct in6_addr *final_p, final;
+       struct ipv6_txoptions *opt;
+       struct dst_entry *dst;
+       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
+       struct flowi6 fl6;
+       int err = 0;
+
+       if (np->sndflow && (np->flow_label & IPV6_FLOWLABEL_MASK)) {
+               flowlabel = fl6_sock_lookup(sk, np->flow_label);
+               if (!flowlabel)
+                       return -EINVAL;
+       }
+       ip6_datagram_flow_key_init(&fl6, sk);
+
+       rcu_read_lock();
+       opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
+       final_p = fl6_update_dst(&fl6, opt, &final);
+       rcu_read_unlock();
+
+       dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
+               goto out;
+       }
+
+       if (fix_sk_saddr) {
+               if (ipv6_addr_any(&np->saddr))
+                       np->saddr = fl6.saddr;
+
+               if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+                       sk->sk_v6_rcv_saddr = fl6.saddr;
+                       inet->inet_rcv_saddr = LOOPBACK4_IPV6;
+                       if (sk->sk_prot->rehash)
+                               sk->sk_prot->rehash(sk);
+               }
+       }
+
+       ip6_dst_store(sk, dst,
+                     ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
+                     &sk->sk_v6_daddr : NULL,
+#ifdef CONFIG_IPV6_SUBTREES
+                     ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
+                     &np->saddr :
+#endif
+                     NULL);
+
+out:
+       fl6_sock_release(flowlabel);
+       return err;
+}
+
+void ip6_datagram_release_cb(struct sock *sk)
+{
+       struct dst_entry *dst;
+
+       if (ipv6_addr_v4mapped(&sk->sk_v6_daddr))
+               return;
+
+       rcu_read_lock();
+       dst = __sk_dst_get(sk);
+       if (!dst || !dst->obsolete ||
+           dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) {
+               rcu_read_unlock();
+               return;
+       }
+       rcu_read_unlock();
+
+       ip6_datagram_dst_update(sk, false);
+}
+EXPORT_SYMBOL_GPL(ip6_datagram_release_cb);
+
 static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
        struct sockaddr_in6     *usin = (struct sockaddr_in6 *) uaddr;
        struct inet_sock        *inet = inet_sk(sk);
        struct ipv6_pinfo       *np = inet6_sk(sk);
-       struct in6_addr *daddr, *final_p, final;
-       struct dst_entry        *dst;
-       struct flowi6           fl6;
-       struct ip6_flowlabel    *flowlabel = NULL;
-       struct ipv6_txoptions   *opt;
+       struct in6_addr         *daddr;
        int                     addr_type;
        int                     err;
+       __be32                  fl6_flowlabel = 0;
 
        if (usin->sin6_family == AF_INET) {
                if (__ipv6_only_sock(sk))
@@ -66,15 +162,8 @@ static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int a
        if (usin->sin6_family != AF_INET6)
                return -EAFNOSUPPORT;
 
-       memset(&fl6, 0, sizeof(fl6));
-       if (np->sndflow) {
-               fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
-               if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
-                       flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
-                       if (!flowlabel)
-                               return -EINVAL;
-               }
-       }
+       if (np->sndflow)
+               fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
 
        addr_type = ipv6_addr_type(&usin->sin6_addr);
 
@@ -145,7 +234,7 @@ ipv4_connected:
        }
 
        sk->sk_v6_daddr = *daddr;
-       np->flow_label = fl6.flowlabel;
+       np->flow_label = fl6_flowlabel;
 
        inet->inet_dport = usin->sin6_port;
 
@@ -154,59 +243,13 @@ ipv4_connected:
         *      destination cache for it.
         */
 
-       fl6.flowi6_proto = sk->sk_protocol;
-       fl6.daddr = sk->sk_v6_daddr;
-       fl6.saddr = np->saddr;
-       fl6.flowi6_oif = sk->sk_bound_dev_if;
-       fl6.flowi6_mark = sk->sk_mark;
-       fl6.fl6_dport = inet->inet_dport;
-       fl6.fl6_sport = inet->inet_sport;
-
-       if (!fl6.flowi6_oif)
-               fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
-
-       if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
-               fl6.flowi6_oif = np->mcast_oif;
-
-       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
-
-       rcu_read_lock();
-       opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
-       final_p = fl6_update_dst(&fl6, opt, &final);
-       rcu_read_unlock();
-
-       dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
-       err = 0;
-       if (IS_ERR(dst)) {
-               err = PTR_ERR(dst);
+       err = ip6_datagram_dst_update(sk, true);
+       if (err)
                goto out;
-       }
-
-       /* source address lookup done in ip6_dst_lookup */
-
-       if (ipv6_addr_any(&np->saddr))
-               np->saddr = fl6.saddr;
-
-       if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
-               sk->sk_v6_rcv_saddr = fl6.saddr;
-               inet->inet_rcv_saddr = LOOPBACK4_IPV6;
-               if (sk->sk_prot->rehash)
-                       sk->sk_prot->rehash(sk);
-       }
-
-       ip6_dst_store(sk, dst,
-                     ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
-                     &sk->sk_v6_daddr : NULL,
-#ifdef CONFIG_IPV6_SUBTREES
-                     ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
-                     &np->saddr :
-#endif
-                     NULL);
 
        sk->sk_state = TCP_ESTABLISHED;
        sk_set_txhash(sk);
 out:
-       fl6_sock_release(flowlabel);
        return err;
 }
 
index ed446639219c3ae8832a19a1f944e4dd6ddc6302..d916d6ab9ad29aeaeb4bfc57276549f3ae839f40 100644 (file)
@@ -338,9 +338,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
        return rt;
 }
 
-static struct rt6_info *ip6_dst_alloc(struct net *net,
-                                     struct net_device *dev,
-                                     int flags)
+struct rt6_info *ip6_dst_alloc(struct net *net,
+                              struct net_device *dev,
+                              int flags)
 {
        struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
 
@@ -364,6 +364,7 @@ static struct rt6_info *ip6_dst_alloc(struct net *net,
 
        return rt;
 }
+EXPORT_SYMBOL(ip6_dst_alloc);
 
 static void ip6_dst_destroy(struct dst_entry *dst)
 {
@@ -1417,8 +1418,20 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
 
 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
 {
+       struct dst_entry *dst;
+
        ip6_update_pmtu(skb, sock_net(sk), mtu,
                        sk->sk_bound_dev_if, sk->sk_mark);
+
+       dst = __sk_dst_get(sk);
+       if (!dst || !dst->obsolete ||
+           dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
+               return;
+
+       bh_lock_sock(sk);
+       if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
+               ip6_datagram_dst_update(sk, false);
+       bh_unlock_sock(sk);
 }
 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
 
index 8125931106be670b13e186c577141bc3d1fb574b..6bc5c664fa46c048709db40d1a2a7cc5d4df63da 100644 (file)
@@ -1539,6 +1539,7 @@ struct proto udpv6_prot = {
        .sendmsg           = udpv6_sendmsg,
        .recvmsg           = udpv6_recvmsg,
        .backlog_rcv       = __udpv6_queue_rcv_skb,
+       .release_cb        = ip6_datagram_release_cb,
        .hash              = udp_lib_hash,
        .unhash            = udp_lib_unhash,
        .rehash            = udp_v6_rehash,
index 278f3b9356efdcd37ca3d1aacb62f03e57db7e3c..7cc1d9c22a9fa757d1f752dfcb4c8e928c4c9e11 100644 (file)
@@ -410,6 +410,8 @@ static void tcp_options(const struct sk_buff *skb,
                        length--;
                        continue;
                default:
+                       if (length < 2)
+                               return;
                        opsize=*ptr++;
                        if (opsize < 2) /* "silly options" */
                                return;
@@ -470,6 +472,8 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
                        length--;
                        continue;
                default:
+                       if (length < 2)
+                               return;
                        opsize = *ptr++;
                        if (opsize < 2) /* "silly options" */
                                return;
index 215fc08c02ab5508374b08ba9dcefc44e8d0875b..330ebd600f254d53e23a174273a504e395c0e7cf 100644 (file)
@@ -688,7 +688,7 @@ static int netlink_release(struct socket *sock)
 
        skb_queue_purge(&sk->sk_write_queue);
 
-       if (nlk->portid) {
+       if (nlk->portid && nlk->bound) {
                struct netlink_notify n = {
                                                .net = sock_net(sk),
                                                .protocol = sk->sk_protocol,
index e9dd47b2a85b9e7b65795335b5722e89ac5c3ed8..879185fe183fd0ffa2bf037725faf0a8eb5f166a 100644 (file)
@@ -461,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
                mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
 
                if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
-                       set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
+                       set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
                                      true);
                        memcpy(&flow_key->ipv6.addr.src, masked,
                               sizeof(flow_key->ipv6.addr.src));
@@ -483,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
                                                             NULL, &flags)
                                               != NEXTHDR_ROUTING);
 
-                       set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
+                       set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
                                      recalc_csum);
                        memcpy(&flow_key->ipv6.addr.dst, masked,
                               sizeof(flow_key->ipv6.addr.dst));
index 1b9d286756be7ccf3a9b6f9eaad6fe07c385f8fb..b5fea1101faaa52162d438a8cb5e91d3a83ec8bd 100644 (file)
@@ -367,6 +367,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
        } else if (key->eth.type == htons(ETH_P_IPV6)) {
                enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
 
+               skb_orphan(skb);
                memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
                err = nf_ct_frag6_gather(net, skb, user);
                if (err)
index f12c17f355d932d9b6a396654a66fceb5aaf0c73..18d0becbc46d0dd069dde0bcce47c8b4ee2186f9 100644 (file)
@@ -3521,6 +3521,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
        i->ifindex = mreq->mr_ifindex;
        i->alen = mreq->mr_alen;
        memcpy(i->addr, mreq->mr_address, i->alen);
+       memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
        i->count = 1;
        i->next = po->mclist;
        po->mclist = i;
index e6144b8246fd27fe49bffd228b44a44c3e7cbd81..6641bcf7c18505f1d653a32c9c5dc0455926dfd8 100644 (file)
@@ -299,7 +299,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
        i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
        off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 
-       __set_bit_le(off, (void *)map->m_page_addrs[i]);
+       set_bit_le(off, (void *)map->m_page_addrs[i]);
 }
 
 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
@@ -313,7 +313,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
        i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
        off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 
-       __clear_bit_le(off, (void *)map->m_page_addrs[i]);
+       clear_bit_le(off, (void *)map->m_page_addrs[i]);
 }
 
 static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
index 8764970f0c24179b8470a27c1ab0d45567a0d90f..310cabce23111cfaa45af97adef28f4d41d1bbda 100644 (file)
@@ -194,7 +194,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
                dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
                dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
                dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
-               dp->dp_ack_seq = rds_ib_piggyb_ack(ic);
+               dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic));
 
                /* Advertise flow control */
                if (ic->i_flowctl) {
index f18c3502420730e87254a6b01f0a6c1e4fb96941..80742edea96fe8a237573c69d3a6349342e6db00 100644 (file)
@@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
        if (validate)
                skb = validate_xmit_skb_list(skb, dev);
 
-       if (skb) {
+       if (likely(skb)) {
                HARD_TX_LOCK(dev, txq, smp_processor_id());
                if (!netif_xmit_frozen_or_stopped(txq))
                        skb = dev_hard_start_xmit(skb, dev, txq, &ret);
 
                HARD_TX_UNLOCK(dev, txq);
+       } else {
+               spin_lock(root_lock);
+               return qdisc_qlen(q);
        }
        spin_lock(root_lock);
 
index 8d3d3625130ee0fd294998554a9290d57eae56e7..084718f9b3dad09e21e41e34b989e25627058c98 100644 (file)
@@ -866,8 +866,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                                 * sender MUST assure that at least one T3-rtx
                                 * timer is running.
                                 */
-                               if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN)
-                                       sctp_transport_reset_timers(transport);
+                               if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
+                                       sctp_transport_reset_t3_rtx(transport);
+                                       transport->last_time_sent = jiffies;
+                               }
                        }
                        break;
 
@@ -924,8 +926,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                        error = sctp_outq_flush_rtx(q, packet,
                                                    rtx_timeout, &start_timer);
 
-                       if (start_timer)
-                               sctp_transport_reset_timers(transport);
+                       if (start_timer) {
+                               sctp_transport_reset_t3_rtx(transport);
+                               transport->last_time_sent = jiffies;
+                       }
 
                        /* This can happen on COOKIE-ECHO resend.  Only
                         * one chunk can get bundled with a COOKIE-ECHO.
@@ -1062,7 +1066,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                        list_add_tail(&chunk->transmitted_list,
                                      &transport->transmitted);
 
-                       sctp_transport_reset_timers(transport);
+                       sctp_transport_reset_t3_rtx(transport);
+                       transport->last_time_sent = jiffies;
 
                        /* Only let one DATA chunk get bundled with a
                         * COOKIE-ECHO chunk.
index 7f0bf798205ba3c5311bf57ace66aaaa57cb0367..56f364d8f93270f31867333585fb28317f9d87ad 100644 (file)
@@ -3080,8 +3080,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
                        return SCTP_ERROR_RSRC_LOW;
 
                /* Start the heartbeat timer. */
-               if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer)))
-                       sctp_transport_hold(peer);
+               sctp_transport_reset_hb_timer(peer);
                asoc->new_transport = peer;
                break;
        case SCTP_PARAM_DEL_IP:
index 7fe56d0acabf66cfd8fe29dfdb45f7620b470ac7..41b081a64752da3e4de5dafe6d3afac84bf4923d 100644 (file)
@@ -69,8 +69,6 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
                             sctp_cmd_seq_t *commands,
                             gfp_t gfp);
 
-static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
-                                    struct sctp_transport *t);
 /********************************************************************
  * Helper functions
  ********************************************************************/
@@ -367,6 +365,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
        struct sctp_association *asoc = transport->asoc;
        struct sock *sk = asoc->base.sk;
        struct net *net = sock_net(sk);
+       u32 elapsed, timeout;
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk)) {
@@ -378,6 +377,16 @@ void sctp_generate_heartbeat_event(unsigned long data)
                goto out_unlock;
        }
 
+       /* Check if we should still send the heartbeat or reschedule */
+       elapsed = jiffies - transport->last_time_sent;
+       timeout = sctp_transport_timeout(transport);
+       if (elapsed < timeout) {
+               elapsed = timeout - elapsed;
+               if (!mod_timer(&transport->hb_timer, jiffies + elapsed))
+                       sctp_transport_hold(transport);
+               goto out_unlock;
+       }
+
        error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
                           asoc->state, asoc->ep, asoc,
@@ -507,7 +516,7 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
                                             0);
 
                /* Update the hb timer to resend a heartbeat every rto */
-               sctp_cmd_hb_timer_update(commands, transport);
+               sctp_transport_reset_hb_timer(transport);
        }
 
        if (transport->state != SCTP_INACTIVE &&
@@ -634,11 +643,8 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
         * hold a reference on the transport to make sure none of
         * the needed data structures go away.
         */
-       list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
-
-               if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
-                       sctp_transport_hold(t);
-       }
+       list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
+               sctp_transport_reset_hb_timer(t);
 }
 
 static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
@@ -669,15 +675,6 @@ static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
 }
 
 
-/* Helper function to update the heartbeat timer. */
-static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
-                                    struct sctp_transport *t)
-{
-       /* Update the heartbeat timer.  */
-       if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
-               sctp_transport_hold(t);
-}
-
 /* Helper function to handle the reception of an HEARTBEAT ACK.  */
 static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
                                  struct sctp_association *asoc,
@@ -742,8 +739,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
        sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
 
        /* Update the heartbeat timer.  */
-       if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
-               sctp_transport_hold(t);
+       sctp_transport_reset_hb_timer(t);
 
        if (was_unconfirmed && asoc->peer.transport_count == 1)
                sctp_transport_immediate_rtx(t);
@@ -1614,7 +1610,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
 
                case SCTP_CMD_HB_TIMER_UPDATE:
                        t = cmd->obj.transport;
-                       sctp_cmd_hb_timer_update(commands, t);
+                       sctp_transport_reset_hb_timer(t);
                        break;
 
                case SCTP_CMD_HB_TIMERS_STOP:
index 9b6b48c7524e4b441a151b80f0babec81f539d49..81b86678be4d6fccc527d3c3e509c12576b2194c 100644 (file)
@@ -183,7 +183,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
 /* Start T3_rtx timer if it is not already running and update the heartbeat
  * timer.  This routine is called every time a DATA chunk is sent.
  */
-void sctp_transport_reset_timers(struct sctp_transport *transport)
+void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
 {
        /* RFC 2960 6.3.2 Retransmission Timer Rules
         *
@@ -197,11 +197,18 @@ void sctp_transport_reset_timers(struct sctp_transport *transport)
                if (!mod_timer(&transport->T3_rtx_timer,
                               jiffies + transport->rto))
                        sctp_transport_hold(transport);
+}
+
+void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
+{
+       unsigned long expires;
 
        /* When a data chunk is sent, reset the heartbeat interval.  */
-       if (!mod_timer(&transport->hb_timer,
-                      sctp_transport_timeout(transport)))
-           sctp_transport_hold(transport);
+       expires = jiffies + sctp_transport_timeout(transport);
+       if (time_before(transport->hb_timer.expires, expires) &&
+           !mod_timer(&transport->hb_timer,
+                      expires + prandom_u32_max(transport->rto)))
+               sctp_transport_hold(transport);
 }
 
 /* This transport has been assigned to an association.
@@ -595,13 +602,13 @@ void sctp_transport_burst_reset(struct sctp_transport *t)
 unsigned long sctp_transport_timeout(struct sctp_transport *trans)
 {
        /* RTO + timer slack +/- 50% of RTO */
-       unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto);
+       unsigned long timeout = trans->rto >> 1;
 
        if (trans->state != SCTP_UNCONFIRMED &&
            trans->state != SCTP_PF)
                timeout += trans->hbinterval;
 
-       return timeout + jiffies;
+       return timeout;
 }
 
 /* Reset transport variables to their initial values */
index 045e11ecd332d0f811fbdd34cb15162cd5bff12f..244245bcbbd25554938ab099137799d47ef6791b 100644 (file)
@@ -78,6 +78,7 @@ krb5_encrypt(
        memcpy(out, in, length);
        sg_init_one(sg, out, length);
 
+       skcipher_request_set_tfm(req, tfm);
        skcipher_request_set_callback(req, 0, NULL, NULL);
        skcipher_request_set_crypt(req, sg, sg, length, local_iv);
 
@@ -115,6 +116,7 @@ krb5_decrypt(
        memcpy(out, in, length);
        sg_init_one(sg, out, length);
 
+       skcipher_request_set_tfm(req, tfm);
        skcipher_request_set_callback(req, 0, NULL, NULL);
        skcipher_request_set_crypt(req, sg, sg, length, local_iv);
 
@@ -946,7 +948,8 @@ krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
                return PTR_ERR(hmac);
        }
 
-       desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+       desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
+                      GFP_KERNEL);
        if (!desc) {
                dprintk("%s: failed to allocate shash descriptor for '%s'\n",
                        __func__, kctx->gk5e->cksum_name);
@@ -1012,7 +1015,8 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
                return PTR_ERR(hmac);
        }
 
-       desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+       desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
+                      GFP_KERNEL);
        if (!desc) {
                dprintk("%s: failed to allocate shash descriptor for '%s'\n",
                        __func__, kctx->gk5e->cksum_name);
index 71341ccb989043acd4c6d449a6d89d9db8b98513..65427492b1c95f681f79345e7ce29c62d941e147 100644 (file)
@@ -451,7 +451,8 @@ context_derive_keys_rc4(struct krb5_ctx *ctx)
                goto out_err_free_hmac;
 
 
-       desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+       desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
+                      GFP_KERNEL);
        if (!desc) {
                dprintk("%s: failed to allocate hash descriptor for '%s'\n",
                        __func__, ctx->gk5e->cksum_name);
index 03a842870c52d22ca5c8e72ce063f78ba8391521..e2bdb07a49a2d6d95e55a051b048082934db9daf 100644 (file)
@@ -69,6 +69,7 @@ static int __net_init tipc_init_net(struct net *net)
        if (err)
                goto out_nametbl;
 
+       INIT_LIST_HEAD(&tn->dist_queue);
        err = tipc_topsrv_start(net);
        if (err)
                goto out_subscr;
index 5504d63503df406f7dd1bb1245b85132fcd193ed..eff58dc53aa12b5d3644bd364b08f60f6d6cd7be 100644 (file)
@@ -103,6 +103,9 @@ struct tipc_net {
        spinlock_t nametbl_lock;
        struct name_table *nametbl;
 
+       /* Name dist queue */
+       struct list_head dist_queue;
+
        /* Topology subscription server */
        struct tipc_server *topsrv;
        atomic_t subscription_count;
index ebe9d0ff6e9e9220621676e3384df3a28477d7a9..6b626a64b5179e9b7d8fc909be203bdf6facfbdd 100644 (file)
 
 int sysctl_tipc_named_timeout __read_mostly = 2000;
 
-/**
- * struct tipc_dist_queue - queue holding deferred name table updates
- */
-static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
-
 struct distr_queue_item {
        struct distr_item i;
        u32 dtype;
@@ -229,12 +224,31 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
        kfree_rcu(p, rcu);
 }
 
+/**
+ * tipc_dist_queue_purge - remove deferred updates from a node that went down
+ */
+static void tipc_dist_queue_purge(struct net *net, u32 addr)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct distr_queue_item *e, *tmp;
+
+       spin_lock_bh(&tn->nametbl_lock);
+       list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
+               if (e->node != addr)
+                       continue;
+               list_del(&e->next);
+               kfree(e);
+       }
+       spin_unlock_bh(&tn->nametbl_lock);
+}
+
 void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
 {
        struct publication *publ, *tmp;
 
        list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
                tipc_publ_purge(net, publ, addr);
+       tipc_dist_queue_purge(net, addr);
 }
 
 /**
@@ -279,9 +293,11 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
  * tipc_named_add_backlog - add a failed name table update to the backlog
  *
  */
-static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
+static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
+                                  u32 type, u32 node)
 {
        struct distr_queue_item *e;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        unsigned long now = get_jiffies_64();
 
        e = kzalloc(sizeof(*e), GFP_ATOMIC);
@@ -291,7 +307,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
        e->node = node;
        e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
        memcpy(e, i, sizeof(*i));
-       list_add_tail(&e->next, &tipc_dist_queue);
+       list_add_tail(&e->next, &tn->dist_queue);
 }
 
 /**
@@ -301,10 +317,11 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
 void tipc_named_process_backlog(struct net *net)
 {
        struct distr_queue_item *e, *tmp;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        char addr[16];
        unsigned long now = get_jiffies_64();
 
-       list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
+       list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
                if (time_after(e->expires, now)) {
                        if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
                                continue;
@@ -344,7 +361,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
                node = msg_orignode(msg);
                while (count--) {
                        if (!tipc_update_nametbl(net, item, node, mtype))
-                               tipc_named_add_backlog(item, mtype, node);
+                               tipc_named_add_backlog(net, item, mtype, node);
                        item++;
                }
                kfree_skb(skb);
index 662bdd20a7489198fdbbcb00826f8a2782a62a50..56214736fe888090f569739e2e008c72d8785fb0 100644 (file)
@@ -1735,11 +1735,8 @@ static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
        /* Retrieve the head sk_buff from the socket's receive queue. */
        err = 0;
        skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
-       if (err)
-               return err;
-
        if (!skb)
-               return -EAGAIN;
+               return err;
 
        dg = (struct vmci_datagram *)skb->data;
        if (!dg)
@@ -2154,7 +2151,7 @@ module_exit(vmci_transport_exit);
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
-MODULE_VERSION("1.0.3.0-k");
+MODULE_VERSION("1.0.4.0-k");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("vmware_vsock");
 MODULE_ALIAS_NETPROTO(PF_VSOCK);
index 98c924260b3d312055c598255cff8478bccba1e9..056a7307862b78f5f0b2013b41c2686b5de18064 100644 (file)
@@ -13216,7 +13216,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
        struct wireless_dev *wdev;
        struct cfg80211_beacon_registration *reg, *tmp;
 
-       if (state != NETLINK_URELEASE)
+       if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
                return NOTIFY_DONE;
 
        rcu_read_lock();
index e000f44e37b8775f45023464b765a460d92d1204..c1b7ef3e24c1c055cc90d1e986b11308ff7d2e02 100644 (file)
@@ -650,7 +650,7 @@ int main(int argc, char **argv)
        }
 
        hdr = fopen(headername, "w");
-       if (!out) {
+       if (!hdr) {
                perror(headername);
                exit(1);
        }
index d1a4d697333077126f99f4951c00f647d06aaf5e..03c9872c31cfe4a2a723d3e4bb0f4896b313dc32 100644 (file)
@@ -299,13 +299,11 @@ EXPORT_SYMBOL_GPL(_snd_hdac_read_parm);
 int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid,
                                int parm)
 {
-       int val;
+       unsigned int cmd, val;
 
-       if (codec->regmap)
-               regcache_cache_bypass(codec->regmap, true);
-       val = snd_hdac_read_parm(codec, nid, parm);
-       if (codec->regmap)
-               regcache_cache_bypass(codec->regmap, false);
+       cmd = snd_hdac_regmap_encode_verb(nid, AC_VERB_PARAMETERS) | parm;
+       if (snd_hdac_regmap_read_raw_uncached(codec, cmd, &val) < 0)
+               return -1;
        return val;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_read_parm_uncached);
index fb96aead825707e7bca496bf9fcd1af389873dbc..54babe1c0b1679b97ae43c3b745f0843b15ccfde 100644 (file)
@@ -267,6 +267,18 @@ int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops
 }
 EXPORT_SYMBOL_GPL(snd_hdac_i915_register_notifier);
 
+/* check whether intel graphics is present */
+static bool i915_gfx_present(void)
+{
+       static struct pci_device_id ids[] = {
+               { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID),
+                 .class = PCI_BASE_CLASS_DISPLAY << 16,
+                 .class_mask = 0xff << 16 },
+               {}
+       };
+       return pci_dev_present(ids);
+}
+
 /**
  * snd_hdac_i915_init - Initialize i915 audio component
  * @bus: HDA core bus
@@ -286,6 +298,9 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
        struct i915_audio_component *acomp;
        int ret;
 
+       if (!i915_gfx_present())
+               return -ENODEV;
+
        acomp = kzalloc(sizeof(*acomp), GFP_KERNEL);
        if (!acomp)
                return -ENOMEM;
index bdbcd6b75ff61cb0495e36072eaefa2ec2478dc2..87041ddd29cbcbf2c63fca9ed90ffdbc0e5f249d 100644 (file)
@@ -453,14 +453,30 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
 EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw);
 
 static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
-                       unsigned int *val)
+                       unsigned int *val, bool uncached)
 {
-       if (!codec->regmap)
+       if (uncached || !codec->regmap)
                return hda_reg_read(codec, reg, val);
        else
                return regmap_read(codec->regmap, reg, val);
 }
 
+static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
+                                     unsigned int reg, unsigned int *val,
+                                     bool uncached)
+{
+       int err;
+
+       err = reg_raw_read(codec, reg, val, uncached);
+       if (err == -EAGAIN) {
+               err = snd_hdac_power_up_pm(codec);
+               if (!err)
+                       err = reg_raw_read(codec, reg, val, uncached);
+               snd_hdac_power_down_pm(codec);
+       }
+       return err;
+}
+
 /**
  * snd_hdac_regmap_read_raw - read a pseudo register with power mgmt
  * @codec: the codec object
@@ -472,19 +488,19 @@ static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
 int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
                             unsigned int *val)
 {
-       int err;
-
-       err = reg_raw_read(codec, reg, val);
-       if (err == -EAGAIN) {
-               err = snd_hdac_power_up_pm(codec);
-               if (!err)
-                       err = reg_raw_read(codec, reg, val);
-               snd_hdac_power_down_pm(codec);
-       }
-       return err;
+       return __snd_hdac_regmap_read_raw(codec, reg, val, false);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_regmap_read_raw);
 
+/* Works like snd_hdac_regmap_read_raw(), but this doesn't read from the
+ * cache but always via hda verbs.
+ */
+int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
+                                     unsigned int reg, unsigned int *val)
+{
+       return __snd_hdac_regmap_read_raw(codec, reg, val, true);
+}
+
 /**
  * snd_hdac_regmap_update_raw - update a pseudo register with power mgmt
  * @codec: the codec object
index 7b248cdf06e2166d82dfc69ea318247fc8eca6f7..fdcfa29e220551b473534938d8995c7abf7ebe84 100644 (file)
@@ -591,7 +591,7 @@ static int sscape_upload_microcode(struct snd_card *card, int version)
        }
        err = upload_dma_data(sscape, init_fw->data, init_fw->size);
        if (err == 0)
-               snd_printk(KERN_INFO "sscape: MIDI firmware loaded %d KBs\n",
+               snd_printk(KERN_INFO "sscape: MIDI firmware loaded %zu KBs\n",
                                init_fw->size >> 10);
 
        release_firmware(init_fw);
index 7ca5b89f088a6922e6acd09c12befae864994320..dfaf1a93fb8a3b8aba4fee3f3090a48b08ed2734 100644 (file)
@@ -826,7 +826,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
                                   bool allow_powerdown)
 {
        hda_nid_t nid, changed = 0;
-       int i, state;
+       int i, state, power;
 
        for (i = 0; i < path->depth; i++) {
                nid = path->path[i];
@@ -838,7 +838,9 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
                        state = AC_PWRST_D0;
                else
                        state = AC_PWRST_D3;
-               if (!snd_hda_check_power_state(codec, nid, state)) {
+               power = snd_hda_codec_read(codec, nid, 0,
+                                          AC_VERB_GET_POWER_STATE, 0);
+               if (power != (state | (state << 4))) {
                        snd_hda_codec_write(codec, nid, 0,
                                            AC_VERB_SET_POWER_STATE, state);
                        changed = nid;
index b680be0e937d9cd7b77b8e21365a73fe9a8c925b..637b8a0e2a91d3ab9b73ddc8a4b9fc4a24303e00 100644 (file)
@@ -2232,6 +2232,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Broxton-P(Apollolake) */
        { PCI_DEVICE(0x8086, 0x5a98),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
+       /* Broxton-T */
+       { PCI_DEVICE(0x8086, 0x1a98),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
        /* Haswell */
        { PCI_DEVICE(0x8086, 0x0a0c),
          .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
index a47e8ae0eb308355416886d4753c9f950bd0cd56..80bbadc83721447754392238118eee98484616b6 100644 (file)
@@ -361,6 +361,7 @@ static int cs_parse_auto_config(struct hda_codec *codec)
 {
        struct cs_spec *spec = codec->spec;
        int err;
+       int i;
 
        err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
        if (err < 0)
@@ -370,6 +371,19 @@ static int cs_parse_auto_config(struct hda_codec *codec)
        if (err < 0)
                return err;
 
+       /* keep the ADCs powered up when it's dynamically switchable */
+       if (spec->gen.dyn_adc_switch) {
+               unsigned int done = 0;
+               for (i = 0; i < spec->gen.input_mux.num_items; i++) {
+                       int idx = spec->gen.dyn_adc_idx[i];
+                       if (done & (1 << idx))
+                               continue;
+                       snd_hda_gen_fix_pin_power(codec,
+                                                 spec->gen.adc_nids[idx]);
+                       done |= 1 << idx;
+               }
+       }
+
        return 0;
 }
 
index 5af372d018346751f1dc264b413218ff08adcd22..40933aa33afe337821570176e93cc9cbb71a8c28 100644 (file)
@@ -1396,7 +1396,6 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
        struct hda_codec *codec = per_pin->codec;
        struct hdmi_spec *spec = codec->spec;
        struct hdmi_eld *eld = &spec->temp_eld;
-       struct hdmi_eld *pin_eld = &per_pin->sink_eld;
        hda_nid_t pin_nid = per_pin->pin_nid;
        /*
         * Always execute a GetPinSense verb here, even when called from
@@ -1413,15 +1412,15 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
        present = snd_hda_pin_sense(codec, pin_nid);
 
        mutex_lock(&per_pin->lock);
-       pin_eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
-       if (pin_eld->monitor_present)
+       eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
+       if (eld->monitor_present)
                eld->eld_valid  = !!(present & AC_PINSENSE_ELDV);
        else
                eld->eld_valid = false;
 
        codec_dbg(codec,
                "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
-               codec->addr, pin_nid, pin_eld->monitor_present, eld->eld_valid);
+               codec->addr, pin_nid, eld->monitor_present, eld->eld_valid);
 
        if (eld->eld_valid) {
                if (spec->ops.pin_get_eld(codec, pin_nid, eld->eld_buffer,
@@ -1441,7 +1440,7 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
        else
                update_eld(codec, per_pin, eld);
 
-       ret = !repoll || !pin_eld->monitor_present || pin_eld->eld_valid;
+       ret = !repoll || !eld->monitor_present || eld->eld_valid;
 
        jack = snd_hda_jack_tbl_get(codec, pin_nid);
        if (jack)
@@ -1859,6 +1858,8 @@ static void hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
        struct hdmi_spec *spec = codec->spec;
        struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx);
 
+       if (!per_pin)
+               return;
        mutex_lock(&per_pin->lock);
        per_pin->chmap_set = true;
        memcpy(per_pin->chmap, chmap, ARRAY_SIZE(per_pin->chmap));
index fefe83f2beabd662ecf882a832cfc66c03580bd2..810bceee4fd213bbd0b95acd03fa03cb15fe9c93 100644 (file)
@@ -4760,6 +4760,7 @@ enum {
        ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC280_FIXUP_HP_HEADSET_MIC,
        ALC221_FIXUP_HP_FRONT_MIC,
+       ALC292_FIXUP_TPT460,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -5409,6 +5410,12 @@ static const struct hda_fixup alc269_fixups[] = {
                        { }
                },
        },
+       [ALC292_FIXUP_TPT460] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_tpt440_dock,
+               .chained = true,
+               .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5442,6 +5449,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
+       SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
        SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5563,7 +5571,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
-       SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+       SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -5658,6 +5666,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
        {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
        {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
+       {.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
        {}
 };
 #define ALC225_STANDARD_PINS \
index c5194f5b150aadb7d0b08cd5831aab2a03f2392d..d7e71f3092998cdaa3894adefdd9468c236bb24e 100644 (file)
@@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
        }
 
        pcxhr_msg_thread(mgr);
+       mutex_unlock(&mgr->lock);
        return IRQ_HANDLED;
 }
index ddca6547399b0103b37abbdf73cfa5c9e710a7fd..1f8fb0d904e059d4e6084d0c221489d598e9e4c1 100644 (file)
@@ -348,6 +348,16 @@ static struct usbmix_name_map bose_companion5_map[] = {
        { 0 }   /* terminator */
 };
 
+/*
+ * Dell usb dock with ALC4020 codec had a firmware problem where it got
+ * screwed up when zero volume is passed; just skip it as a workaround
+ */
+static const struct usbmix_name_map dell_alc4020_map[] = {
+       { 16, NULL },
+       { 19, NULL },
+       { 0 }
+};
+
 /*
  * Control map entries
  */
@@ -430,6 +440,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .id = USB_ID(0x0ccd, 0x0028),
                .map = aureon_51_2_map,
        },
+       {
+               .id = USB_ID(0x0bda, 0x4014),
+               .map = dell_alc4020_map,
+       },
        {
                .id = USB_ID(0x0dba, 0x1000),
                .map = mbox1_map,
index 6178bb5d07318ac7781b1f27fdb2b41a54f1f032..0adfd9537cf766bbe130c71c4cc029091b2d4118 100644 (file)
@@ -1134,9 +1134,11 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
        case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
        case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+       case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
        case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
        case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+       case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
        case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
                return true;
        }
index 69bb3fc38fb2b71ba9f10070d7331bf8d2e158ce..0840684deb7d474578050f9c230fe9196e46a486 100644 (file)
@@ -3,3 +3,4 @@ psock_fanout
 psock_tpacket
 reuseport_bpf
 reuseport_bpf_cpu
+reuseport_dualstack
index c658792d47b495badb3470e5d2f1143bbb94827a..0e5340742620bc332df4ad49737fac072738d338 100644 (file)
@@ -4,7 +4,7 @@ CFLAGS = -Wall -O2 -g
 
 CFLAGS += -I../../../../usr/include/
 
-NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu
+NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu reuseport_dualstack
 
 all: $(NET_PROGS)
 %: %.c
diff --git a/tools/testing/selftests/net/reuseport_dualstack.c b/tools/testing/selftests/net/reuseport_dualstack.c
new file mode 100644 (file)
index 0000000..90958aa
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * It is possible to use SO_REUSEPORT to open multiple sockets bound to
+ * equivalent local addresses using AF_INET and AF_INET6 at the same time.  If
+ * the AF_INET6 socket has IPV6_V6ONLY set, it's clear which socket should
+ * receive a given incoming packet.  However, when it is not set, incoming v4
+ * packets should prefer the AF_INET socket(s).  This behavior was defined with
+ * the original SO_REUSEPORT implementation, but broke with
+ * e32ea7e74727 ("soreuseport: fast reuseport UDP socket selection")
+ * This test creates these mixed AF_INET/AF_INET6 sockets and asserts the
+ * AF_INET preference for v4 packets.
+ */
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <error.h>
+#include <linux/in.h>
+#include <linux/unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/epoll.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+static const int PORT = 8888;
+
+static void build_rcv_fd(int family, int proto, int *rcv_fds, int count)
+{
+       struct sockaddr_storage addr;
+       struct sockaddr_in  *addr4;
+       struct sockaddr_in6 *addr6;
+       int opt, i;
+
+       switch (family) {
+       case AF_INET:
+               addr4 = (struct sockaddr_in *)&addr;
+               addr4->sin_family = AF_INET;
+               addr4->sin_addr.s_addr = htonl(INADDR_ANY);
+               addr4->sin_port = htons(PORT);
+               break;
+       case AF_INET6:
+               addr6 = (struct sockaddr_in6 *)&addr;
+               addr6->sin6_family = AF_INET6;
+               addr6->sin6_addr = in6addr_any;
+               addr6->sin6_port = htons(PORT);
+               break;
+       default:
+               error(1, 0, "Unsupported family %d", family);
+       }
+
+       for (i = 0; i < count; ++i) {
+               rcv_fds[i] = socket(family, proto, 0);
+               if (rcv_fds[i] < 0)
+                       error(1, errno, "failed to create receive socket");
+
+               opt = 1;
+               if (setsockopt(rcv_fds[i], SOL_SOCKET, SO_REUSEPORT, &opt,
+                              sizeof(opt)))
+                       error(1, errno, "failed to set SO_REUSEPORT");
+
+               if (bind(rcv_fds[i], (struct sockaddr *)&addr, sizeof(addr)))
+                       error(1, errno, "failed to bind receive socket");
+
+               if (proto == SOCK_STREAM && listen(rcv_fds[i], 10))
+                       error(1, errno, "failed to listen on receive port");
+       }
+}
+
+static void send_from_v4(int proto)
+{
+       struct sockaddr_in  saddr, daddr;
+       int fd;
+
+       saddr.sin_family = AF_INET;
+       saddr.sin_addr.s_addr = htonl(INADDR_ANY);
+       saddr.sin_port = 0;
+
+       daddr.sin_family = AF_INET;
+       daddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+       daddr.sin_port = htons(PORT);
+
+       fd = socket(AF_INET, proto, 0);
+       if (fd < 0)
+               error(1, errno, "failed to create send socket");
+
+       if (bind(fd, (struct sockaddr *)&saddr, sizeof(saddr)))
+               error(1, errno, "failed to bind send socket");
+
+       if (connect(fd, (struct sockaddr *)&daddr, sizeof(daddr)))
+               error(1, errno, "failed to connect send socket");
+
+       if (send(fd, "a", 1, 0) < 0)
+               error(1, errno, "failed to send message");
+
+       close(fd);
+}
+
+static int receive_once(int epfd, int proto)
+{
+       struct epoll_event ev;
+       int i, fd;
+       char buf[8];
+
+       i = epoll_wait(epfd, &ev, 1, -1);
+       if (i < 0)
+               error(1, errno, "epoll_wait failed");
+
+       if (proto == SOCK_STREAM) {
+               fd = accept(ev.data.fd, NULL, NULL);
+               if (fd < 0)
+                       error(1, errno, "failed to accept");
+               i = recv(fd, buf, sizeof(buf), 0);
+               close(fd);
+       } else {
+               i = recv(ev.data.fd, buf, sizeof(buf), 0);
+       }
+
+       if (i < 0)
+               error(1, errno, "failed to recv");
+
+       return ev.data.fd;
+}
+
+static void test(int *rcv_fds, int count, int proto)
+{
+       struct epoll_event ev;
+       int epfd, i, test_fd;
+       uint16_t test_family;
+       socklen_t len;
+
+       epfd = epoll_create(1);
+       if (epfd < 0)
+               error(1, errno, "failed to create epoll");
+
+       ev.events = EPOLLIN;
+       for (i = 0; i < count; ++i) {
+               ev.data.fd = rcv_fds[i];
+               if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fds[i], &ev))
+                       error(1, errno, "failed to register sock epoll");
+       }
+
+       send_from_v4(proto);
+
+       test_fd = receive_once(epfd, proto);
+       if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
+               error(1, errno, "failed to read socket domain");
+       if (test_family != AF_INET)
+               error(1, 0, "expected to receive on v4 socket but got v6 (%d)",
+                     test_family);
+
+       close(epfd);
+}
+
+int main(void)
+{
+       int rcv_fds[32], i;
+
+       fprintf(stderr, "---- UDP IPv4 created before IPv6 ----\n");
+       build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 5);
+       build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[5]), 5);
+       test(rcv_fds, 10, SOCK_DGRAM);
+       for (i = 0; i < 10; ++i)
+               close(rcv_fds[i]);
+
+       fprintf(stderr, "---- UDP IPv6 created before IPv4 ----\n");
+       build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 5);
+       build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[5]), 5);
+       test(rcv_fds, 10, SOCK_DGRAM);
+       for (i = 0; i < 10; ++i)
+               close(rcv_fds[i]);
+
+       /* NOTE: UDP socket lookups traverse a different code path when there
+        * are > 10 sockets in a group.
+        */
+       fprintf(stderr, "---- UDP IPv4 created before IPv6 (large) ----\n");
+       build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 16);
+       build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[16]), 16);
+       test(rcv_fds, 32, SOCK_DGRAM);
+       for (i = 0; i < 32; ++i)
+               close(rcv_fds[i]);
+
+       fprintf(stderr, "---- UDP IPv6 created before IPv4 (large) ----\n");
+       build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 16);
+       build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[16]), 16);
+       test(rcv_fds, 32, SOCK_DGRAM);
+       for (i = 0; i < 32; ++i)
+               close(rcv_fds[i]);
+
+       fprintf(stderr, "---- TCP IPv4 created before IPv6 ----\n");
+       build_rcv_fd(AF_INET, SOCK_STREAM, rcv_fds, 5);
+       build_rcv_fd(AF_INET6, SOCK_STREAM, &(rcv_fds[5]), 5);
+       test(rcv_fds, 10, SOCK_STREAM);
+       for (i = 0; i < 10; ++i)
+               close(rcv_fds[i]);
+
+       fprintf(stderr, "---- TCP IPv6 created before IPv4 ----\n");
+       build_rcv_fd(AF_INET6, SOCK_STREAM, rcv_fds, 5);
+       build_rcv_fd(AF_INET, SOCK_STREAM, &(rcv_fds[5]), 5);
+       test(rcv_fds, 10, SOCK_STREAM);
+       for (i = 0; i < 10; ++i)
+               close(rcv_fds[i]);
+
+       fprintf(stderr, "SUCCESS\n");
+       return 0;
+}
index a9ad4fe3f68f07848876403985e19e4c544ef1d0..9aaa35dd9144030464d1d038ea5b229c4bb33d0a 100644 (file)
@@ -91,6 +91,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
        vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
        vcpu->arch.timer_cpu.armed = false;
 
+       WARN_ON(!kvm_timer_should_fire(vcpu));
+
        /*
         * If the vcpu is blocked we want to wake it up so that it will see
         * the timer has expired when entering the guest.
@@ -98,10 +100,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
        kvm_vcpu_kick(vcpu);
 }
 
+static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
+{
+       cycle_t cval, now;
+
+       cval = vcpu->arch.timer_cpu.cntv_cval;
+       now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+
+       if (now < cval) {
+               u64 ns;
+
+               ns = cyclecounter_cyc2ns(timecounter->cc,
+                                        cval - now,
+                                        timecounter->mask,
+                                        &timecounter->frac);
+               return ns;
+       }
+
+       return 0;
+}
+
 static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
 {
        struct arch_timer_cpu *timer;
+       struct kvm_vcpu *vcpu;
+       u64 ns;
+
        timer = container_of(hrt, struct arch_timer_cpu, timer);
+       vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
+
+       /*
+        * Check that the timer has really expired from the guest's
+        * PoV (NTP on the host may have forced it to expire
+        * early). If we should have slept longer, restart it.
+        */
+       ns = kvm_timer_compute_delta(vcpu);
+       if (unlikely(ns)) {
+               hrtimer_forward_now(hrt, ns_to_ktime(ns));
+               return HRTIMER_RESTART;
+       }
+
        queue_work(wqueue, &timer->expired);
        return HRTIMER_NORESTART;
 }
@@ -176,8 +214,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
 void kvm_timer_schedule(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-       u64 ns;
-       cycle_t cval, now;
 
        BUG_ON(timer_is_armed(timer));
 
@@ -197,14 +233,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
                return;
 
        /*  The timer has not yet expired, schedule a background timer */
-       cval = timer->cntv_cval;
-       now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
-
-       ns = cyclecounter_cyc2ns(timecounter->cc,
-                                cval - now,
-                                timecounter->mask,
-                                &timecounter->frac);
-       timer_arm(timer, ns);
+       timer_arm(timer, kvm_timer_compute_delta(vcpu));
 }
 
 void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
index b5754c6c5508f9ac77aa73df6a908ae43ee00467..575c7aa30d7e64538bb72c11289c360c2fa884a4 100644 (file)
@@ -193,11 +193,12 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
 {
        u64 reg = 0;
 
-       if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
+       if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
                reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
                reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
                reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
                reg &= kvm_pmu_valid_counter_mask(vcpu);
+       }
 
        return reg;
 }